code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( _lowercase ): def _lowercase (self : List[str] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) UpperCAmelCase_ = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) def _lowercase (self : List[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Optional[int] ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : Tuple ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : int ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__UpperCAmelCase , ) return block_records def _lowercase (self : Any ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : Dict ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(len(__UpperCAmelCase ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" ) self.assertEqual([False, True, True] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase ) def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
SCREAMING_SNAKE_CASE :Dict = tuple[float, float, float] SCREAMING_SNAKE_CASE :Tuple = tuple[float, float, float] def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[Any] )->Vectorad: '''simple docstring''' snake_case_ = end_pointa[0] - end_pointa[0] snake_case_ = end_pointa[1] - end_pointa[1] snake_case_ = end_pointa[2] - end_pointa[2] return (x, y, z) def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :Union[str, Any] )->Vectorad: '''simple docstring''' snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Tuple )->bool: '''simple docstring''' return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Union[str, Any] = 10 )->bool: '''simple docstring''' snake_case_ = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) snake_case_ = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
283
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowerCamelCase__ ( _lowercase ): __UpperCAmelCase = 0 __UpperCAmelCase = False __UpperCAmelCase = 3.0 class lowerCamelCase__ ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} ) @require_cuda def _UpperCAmelCase ( self ) -> Optional[int]: """simple docstring""" # If no defaults are changed, `to_kwargs` returns an empty dict. lowercase : int = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 ) AcceleratorState._reset_state() lowercase : Tuple = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowercase : Dict = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 10_24.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_0_0_0 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase: Any =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase: List[Any] =Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase: str =torch.nn.Linear(100, 200) lowerCAmelCase: Dict =accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase: Optional[Any] ="" lowerCAmelCase: int =model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
607
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __lowerCAmelCase ( lowercase : List[Any] ) -> Dict: """simple docstring""" return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) __snake_case = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _lowerCAmelCase ( _lowercase ): @staticmethod def lowerCamelCase ( UpperCamelCase__ ) -> Dict: '''simple docstring''' snake_case : int = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="Model\'s type." ) train_parser.add_argument( "--tf_checkpoint" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=__UpperCAmelCase , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=__UpperCAmelCase ) def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , ) -> Union[str, Any]: '''simple docstring''' snake_case : str = logging.get_logger("transformers-cli/converting" ) self._logger.info(F'Loading model {model_type}' ) snake_case : Any = model_type snake_case : Dict = tf_checkpoint snake_case : Optional[Any] = pytorch_dump_output snake_case : Tuple = config snake_case : List[str] = finetuning_task_name def lowerCamelCase ( self ) -> int: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): snake_case : Dict = self._tf_checkpoint snake_case : Any = '''''' else: snake_case : List[Any] = self._tf_checkpoint snake_case : Union[str, Any] = '''''' convert_transfo_xl_checkpoint_to_pytorch( __UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
178
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
import sys import turtle def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None: my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 ) triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 ) triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) __lowerCamelCase : Optional[int] = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") __lowerCamelCase : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
323
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str ): snake_case__ = 3 snake_case__ = 2_50 snake_case__ = ids_tensor((batch_size, length) , __UpperCAmelCase ) snake_case__ = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = self._get_tensors(5 ) snake_case__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = MaxLengthCriteria(max_length=10 ) snake_case__ = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ): snake_case__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) snake_case__ = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = self._get_tensors(5 ) snake_case__ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) snake_case__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def SCREAMING_SNAKE_CASE__ ( self:Tuple ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) snake_case__ = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
33
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer UpperCamelCase__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _lowercase ): snake_case : Optional[Any] = """AutoTokenizer""" snake_case : Union[str, Any] = ["""tokenizer"""] snake_case : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None ): super().__init__(__UpperCAmelCase ) UpperCamelCase__ = speaker_embeddings @classmethod def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , **__lowerCAmelCase ): if speaker_embeddings_dict_path is not None: UpperCamelCase__ = get_file_from_repo( __UpperCAmelCase , __UpperCAmelCase , subfolder=kwargs.pop("""subfolder""" , __UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , __UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , __UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , __UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __UpperCAmelCase ) , revision=kwargs.pop("""revision""" , __UpperCAmelCase ) , ) if speaker_embeddings_path is None: logger.warning( f"""`{os.path.join(__UpperCAmelCase , __UpperCAmelCase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) UpperCamelCase__ = None else: with open(__UpperCAmelCase ) as speaker_embeddings_json: UpperCamelCase__ = json.load(__UpperCAmelCase ) else: UpperCamelCase__ = None UpperCamelCase__ = AutoTokenizer.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) return cls(tokenizer=__UpperCAmelCase , speaker_embeddings=__UpperCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase="speaker_embeddings_path.json" , __lowerCAmelCase="speaker_embeddings" , __lowerCAmelCase = False , **__lowerCAmelCase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(__UpperCAmelCase , __UpperCAmelCase , """v2""" ) , exist_ok=__UpperCAmelCase ) UpperCamelCase__ = {} UpperCamelCase__ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": UpperCamelCase__ = self._load_voice_preset(__UpperCAmelCase ) UpperCamelCase__ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , __UpperCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=__UpperCAmelCase , ) UpperCamelCase__ = os.path.join(__UpperCAmelCase , f"""{prompt_key}_{key}.npy""" ) UpperCamelCase__ = tmp_dict with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , """w""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) super().save_pretrained(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase = None , **__lowerCAmelCase ): UpperCamelCase__ = self.speaker_embeddings[voice_preset] UpperCamelCase__ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) UpperCamelCase__ = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __UpperCAmelCase ) , cache_dir=kwargs.pop("""cache_dir""" , __UpperCAmelCase ) , force_download=kwargs.pop("""force_download""" , __UpperCAmelCase ) , proxies=kwargs.pop("""proxies""" , __UpperCAmelCase ) , resume_download=kwargs.pop("""resume_download""" , __UpperCAmelCase ) , local_files_only=kwargs.pop("""local_files_only""" , __UpperCAmelCase ) , use_auth_token=kwargs.pop("""use_auth_token""" , __UpperCAmelCase ) , revision=kwargs.pop("""revision""" , __UpperCAmelCase ) , ) if path is None: raise ValueError( f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) UpperCamelCase__ = np.load(__UpperCAmelCase ) return voice_preset_dict def _lowerCamelCase ( self , __lowerCAmelCase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="pt" , __lowerCAmelCase=256 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , **__lowerCAmelCase , ): if voice_preset is not None and not isinstance(__UpperCAmelCase , __UpperCAmelCase ): if ( isinstance(__UpperCAmelCase , __UpperCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): UpperCamelCase__ = self._load_voice_preset(__UpperCAmelCase ) else: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not voice_preset.endswith(""".npz""" ): UpperCamelCase__ = voice_preset + '''.npz''' UpperCamelCase__ = np.load(__UpperCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(__UpperCAmelCase , **__UpperCAmelCase ) UpperCamelCase__ = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase ) UpperCamelCase__ = self.tokenizer( __UpperCAmelCase , return_tensors=__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) if voice_preset is not None: UpperCamelCase__ = voice_preset return encoded_text
619
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase_ : Tuple = logging.getLogger(__name__) @dataclass class _lowerCamelCase : __a = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __a = field( default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __a = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) __a = field( default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __a = field(default=_lowercase , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __a = field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class _lowerCamelCase : __a = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) __a = field( default=_lowercase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) __a = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __a = field( default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def A__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE__: List[Any]= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE__: Dict= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) SCREAMING_SNAKE_CASE__: int= import_module('''tasks''' ) try: SCREAMING_SNAKE_CASE__: Dict= getattr(snake_case_ , model_args.task_type ) SCREAMING_SNAKE_CASE__: TokenClassificationTask= token_classification_task_clazz() except AttributeError: raise ValueError( F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , snake_case_ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task SCREAMING_SNAKE_CASE__: Optional[int]= token_classification_task.get_labels(data_args.labels ) SCREAMING_SNAKE_CASE__: Dict[int, str]= dict(enumerate(snake_case_ ) ) SCREAMING_SNAKE_CASE__: str= len(snake_case_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__: List[str]= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , idalabel=snake_case_ , labelaid={label: i for i, label in enumerate(snake_case_ )} , cache_dir=model_args.cache_dir , ) SCREAMING_SNAKE_CASE__: Tuple= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) SCREAMING_SNAKE_CASE__: int= AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , ) # Get datasets SCREAMING_SNAKE_CASE__: Tuple= ( TokenClassificationDataset( token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE__: Optional[int]= ( TokenClassificationDataset( token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Tuple[List[int], List[int]]: SCREAMING_SNAKE_CASE__: int= np.argmax(snake_case_ , axis=2 ) SCREAMING_SNAKE_CASE__: Optional[Any]= preds.shape SCREAMING_SNAKE_CASE__: List[Any]= [[] for _ in range(snake_case_ )] SCREAMING_SNAKE_CASE__: Tuple= [[] for _ in range(snake_case_ )] for i in range(snake_case_ ): for j in range(snake_case_ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(snake_case_ : str ) -> Dict: SCREAMING_SNAKE_CASE__: Dict= align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(snake_case_ , snake_case_ ), "precision": precision_score(snake_case_ , snake_case_ ), "recall": recall_score(snake_case_ , snake_case_ ), "f1": fa_score(snake_case_ , snake_case_ ), } # Data collator SCREAMING_SNAKE_CASE__: Optional[Any]= DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE__: Dict= Trainer( model=snake_case_ , args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , compute_metrics=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE__: List[str]= {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) SCREAMING_SNAKE_CASE__: List[str]= trainer.evaluate() SCREAMING_SNAKE_CASE__: str= os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , snake_case_ , snake_case_ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(snake_case_ ) # Predict if training_args.do_predict: SCREAMING_SNAKE_CASE__: Any= TokenClassificationDataset( token_classification_task=snake_case_ , data_dir=data_args.data_dir , tokenizer=snake_case_ , labels=snake_case_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) SCREAMING_SNAKE_CASE__: int= trainer.predict(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= align_predictions(snake_case_ , snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , snake_case_ , snake_case_ ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions SCREAMING_SNAKE_CASE__: Dict= os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(snake_case_ , snake_case_ , snake_case_ ) return results def A__ ( snake_case_ : List[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
64
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() A : Optional[Any] = logging.get_logger(__name__) A : Optional[int] = "Hello world! cécé herlolip" def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = FairseqRobertaModel.from_pretrained(_UpperCamelCase ) roberta.eval() # disable dropout __lowerCAmelCase = roberta.model.encoder.sentence_encoder __lowerCAmelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: __lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print("Our RoBERTa config:" , _UpperCamelCase ) __lowerCAmelCase = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight __lowerCAmelCase = roberta_sent_encoder.embed_positions.weight __lowerCAmelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __lowerCAmelCase = roberta_sent_encoder.layer_norm.weight __lowerCAmelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCAmelCase = model.roberta.encoder.layer[i] __lowerCAmelCase = roberta_sent_encoder.layers[i] __lowerCAmelCase = layer.attention __lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight __lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias # self attention __lowerCAmelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __lowerCAmelCase = roberta_layer.self_attn.q_proj.weight __lowerCAmelCase = roberta_layer.self_attn.q_proj.bias __lowerCAmelCase = roberta_layer.self_attn.k_proj.weight __lowerCAmelCase = roberta_layer.self_attn.k_proj.bias __lowerCAmelCase = roberta_layer.self_attn.v_proj.weight __lowerCAmelCase = roberta_layer.self_attn.v_proj.bias # self-attention output __lowerCAmelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __lowerCAmelCase = roberta_layer.self_attn.out_proj.weight __lowerCAmelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __lowerCAmelCase = roberta_layer.final_layer_norm.weight __lowerCAmelCase = roberta_layer.final_layer_norm.bias # intermediate __lowerCAmelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __lowerCAmelCase = roberta_layer.fca.weight __lowerCAmelCase = roberta_layer.fca.bias # output __lowerCAmelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __lowerCAmelCase = roberta_layer.fca.weight __lowerCAmelCase = roberta_layer.fca.bias # end of layer if classification_head: __lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight __lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias __lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight __lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head __lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight __lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias __lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight __lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias __lowerCAmelCase = roberta.model.encoder.lm_head.weight __lowerCAmelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCAmelCase = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1 __lowerCAmelCase = model(_UpperCamelCase )[0] if classification_head: __lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_UpperCamelCase ) ) else: __lowerCAmelCase = roberta.model(_UpperCamelCase )[0] print(our_output.shape , their_output.shape ) __lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __lowerCAmelCase = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": A : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) A : Optional[Any] = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
636
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase : str = logging.get_logger(__name__) _lowercase : Any = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class _UpperCAmelCase ( _lowercase ): a__ : List[Any] = "blip_text_model" def __init__( self : List[Any] , _lowercase : List[str]=3_05_24 , _lowercase : Optional[int]=7_68 , _lowercase : int=7_68 , _lowercase : List[str]=30_72 , _lowercase : Dict=7_68 , _lowercase : str=12 , _lowercase : Optional[Any]=8 , _lowercase : str=5_12 , _lowercase : List[str]="gelu" , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : int=0.02 , _lowercase : Tuple=3_05_22 , _lowercase : Optional[int]=2 , _lowercase : str=0 , _lowercase : str=1_02 , _lowercase : Tuple=True , _lowercase : Optional[int]=True , **_lowercase : Any , ): super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , sep_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = encoder_hidden_size __UpperCAmelCase = intermediate_size __UpperCAmelCase = projection_dim __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = max_position_embeddings __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = hidden_act __UpperCAmelCase = initializer_range __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = is_decoder __UpperCAmelCase = use_cache @classmethod def a ( cls : Dict , _lowercase : Dict , **_lowercase : Union[str, Any] ): cls._set_token_in_kwargs(__UpperCAmelCase ) __UpperCAmelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __UpperCAmelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _UpperCAmelCase ( _lowercase ): a__ : str = "blip_vision_model" def __init__( self : str , _lowercase : Any=7_68 , _lowercase : int=30_72 , _lowercase : Union[str, Any]=5_12 , _lowercase : int=12 , _lowercase : List[str]=12 , _lowercase : Tuple=3_84 , _lowercase : Dict=16 , _lowercase : Any="gelu" , _lowercase : Union[str, Any]=1E-5 , _lowercase : Union[str, Any]=0.0 , _lowercase : Tuple=1E-10 , **_lowercase : Optional[Any] , ): super().__init__(**__UpperCAmelCase ) __UpperCAmelCase = hidden_size __UpperCAmelCase = intermediate_size __UpperCAmelCase = projection_dim __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = patch_size __UpperCAmelCase = image_size __UpperCAmelCase = initializer_range __UpperCAmelCase = attention_dropout __UpperCAmelCase = layer_norm_eps __UpperCAmelCase = hidden_act @classmethod def a ( cls : Any , _lowercase : Optional[int] , **_lowercase : Dict ): cls._set_token_in_kwargs(__UpperCAmelCase ) __UpperCAmelCase = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": __UpperCAmelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _UpperCAmelCase ( _lowercase ): a__ : Optional[Any] = "blip" a__ : Dict = True def __init__( self : Any , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=2.6_592 , _lowercase : Optional[Any]=2_56 , **_lowercase : List[str] , ): super().__init__(**__UpperCAmelCase ) if text_config is None: __UpperCAmelCase = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: __UpperCAmelCase = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) __UpperCAmelCase = BlipTextConfig(**__UpperCAmelCase ) __UpperCAmelCase = BlipVisionConfig(**__UpperCAmelCase ) __UpperCAmelCase = self.vision_config.hidden_size __UpperCAmelCase = projection_dim __UpperCAmelCase = logit_scale_init_value __UpperCAmelCase = 1.0 __UpperCAmelCase = 0.02 __UpperCAmelCase = image_text_hidden_size @classmethod def a ( cls : str , _lowercase : Tuple , _lowercase : str , **_lowercase : Any ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase ) def a ( self : Any ): __UpperCAmelCase = copy.deepcopy(self.__dict__ ) __UpperCAmelCase = self.text_config.to_dict() __UpperCAmelCase = self.vision_config.to_dict() __UpperCAmelCase = self.__class__.model_type return output
49
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate _a : List[str] = trt.Logger(trt.Logger.WARNING) _a : List[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) _a : Union[str, Any] = logging.getLogger(__name__) _a : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) _a : Any = parser.parse_args() if args.tokenizer_name: _a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) _a : List[str] = args.per_device_eval_batch_size _a : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties _a : Optional[int] = True _a : str = 'temp_engine/bert-fp32.engine' if args.fpaa: _a : List[str] = 'temp_engine/bert-fp16.engine' if args.inta: _a : Optional[int] = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') _a : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network _a : int = [network.get_input(i) for i in range(network.num_inputs)] _a : List[str] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: _a : str = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) _a : Any = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) _a : List[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa ) UpperCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa ) UpperCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE ) # start time UpperCAmelCase = time.time() # Run inference context.execute_async( bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Synchronize the stream and take time stream.synchronize() # end time UpperCAmelCase = time.time() UpperCAmelCase = end_time - start_time UpperCAmelCase = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. _a : Any = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. _a : str = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. _a : str = raw_datasets['validation'].column_names _a : List[str] = 'question' if 'question' in column_names else column_names[0] _a : Any = 'context' if 'context' in column_names else column_names[1] _a : Dict = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). _a : List[str] = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) _a : int = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace UpperCAmelCase = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. UpperCAmelCase = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='max_length' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. UpperCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. UpperCAmelCase = [] for i in range(len(tokenized_examples['input_ids'] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). UpperCAmelCase = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE ) UpperCAmelCase = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. UpperCAmelCase = sample_mapping[i] tokenized_examples["example_id"].append(examples['id'][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. UpperCAmelCase = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['offset_mapping'][i] ) ] return tokenized_examples _a : str = raw_datasets['validation'] # Validation Feature Creation _a : Any = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) _a : Optional[Any] = default_data_collator _a : Optional[Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) _a : Tuple = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. UpperCAmelCase = postprocess_qa_predictions( examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , ) # Format the result to the format the metric expects. if args.version_2_with_negative: UpperCAmelCase = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: UpperCAmelCase = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] UpperCAmelCase = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) _a : List[str] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int ): return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize # Allocate device memory for inputs and outputs. _a : Optional[int] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer _a : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) _a : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) _a : str = cuda.mem_alloc(h_outputa.nbytes) _a : Dict = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. _a : Optional[Any] = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(F''' Num examples = {len(eval_dataset)}''') logger.info(F''' Batch size = {args.per_device_eval_batch_size}''') _a : Union[str, Any] = 0.0 _a : List[str] = 0 _a : Any = timeit.default_timer() _a : int = None for step, batch in enumerate(eval_dataloader): _a , _a : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 _a , _a : List[Any] = outputs _a : Optional[Any] = torch.tensor(start_logits) _a : Union[str, Any] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered _a : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) _a : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) _a : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) _a : List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: _a : Tuple = nested_truncate(all_preds, len(eval_dataset)) _a : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) _a : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds) _a : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'''Evaluation metrics: {eval_metric}''')
447
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
'''simple docstring''' import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict ) -> Optional[int]: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_parquet_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase_ = features.copy() if features else default_expected_features UpperCAmelCase_ = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ = ParquetDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_parquet_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : List[str] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase_ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_parquet_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple ) -> str: '''simple docstring''' if issubclass(snake_case_ , snake_case_ ): UpperCAmelCase_ = parquet_path elif issubclass(snake_case_ , snake_case_ ): UpperCAmelCase_ = [parquet_path] UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase_ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_parquet_dataset(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any]=("train",) ) -> str: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ) for split in splits: UpperCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase_ = ParquetDatasetReader( {"train": parquet_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_parquet_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase_ = features.copy() if features else default_expected_features UpperCAmelCase_ = ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase_ = ParquetDatasetReader({"train": parquet_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_parquet_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Tuple ) -> Dict: '''simple docstring''' if split: UpperCAmelCase_ = {split: parquet_path} else: UpperCAmelCase_ = '''train''' UpperCAmelCase_ = {'''train''': parquet_path, '''test''': parquet_path} UpperCAmelCase_ = tmp_path / '''cache''' UpperCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase_ = ParquetDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_parquet_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = ParquetDatasetWriter(snake_case_ , tmp_path / "foo.parquet" ) assert writer.write() > 0 UpperCAmelCase_ = pq.ParquetFile(tmp_path / "foo.parquet" ) UpperCAmelCase_ = pf.read() assert dataset.data.table == output_table def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = str(shared_datadir / "test_image_rgb.jpg" ) UpperCAmelCase_ = {'''image''': [image_path]} UpperCAmelCase_ = Features({"image": Image()} ) UpperCAmelCase_ = Dataset.from_dict(snake_case_ , features=snake_case_ ) UpperCAmelCase_ = ParquetDatasetWriter(snake_case_ , tmp_path / "foo.parquet" ) assert writer.write() > 0 UpperCAmelCase_ = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features UpperCAmelCase_ = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=snake_case_ ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict ) -> Any: '''simple docstring''' assert get_writer_batch_size(snake_case_ ) == expected
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__) def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->List[str]: '''simple docstring''' snake_case_ = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: snake_case_ = 128 elif "12-12" in model_name: snake_case_ = 12 snake_case_ = 12 elif "14-14" in model_name: snake_case_ = 14 snake_case_ = 14 elif "16-16" in model_name: snake_case_ = 16 snake_case_ = 16 else: raise ValueError("Model not supported" ) snake_case_ = '''huggingface/label-files''' if "speech-commands" in model_name: snake_case_ = 35 snake_case_ = '''speech-commands-v2-id2label.json''' else: snake_case_ = 527 snake_case_ = '''audioset-id2label.json''' snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) ) snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( lowerCAmelCase_ :Any )->int: '''simple docstring''' if "module.v" in name: snake_case_ = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: snake_case_ = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: snake_case_ = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: snake_case_ = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: snake_case_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: snake_case_ = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: snake_case_ = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: snake_case_ = name.replace("attn" , "attention.self" ) if "norm1" in name: snake_case_ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: snake_case_ = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: snake_case_ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: snake_case_ = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: snake_case_ = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: snake_case_ = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: snake_case_ = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[Any] )->Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(lowerCAmelCase_ ) if "qkv" in key: snake_case_ = key.split("." ) snake_case_ = int(key_split[3] ) snake_case_ = config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = val return orig_state_dict def _lowerCAmelCase ( lowerCAmelCase_ :str )->Optional[Any]: '''simple docstring''' snake_case_ = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) @torch.no_grad() def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Any=False )->Tuple: '''simple docstring''' snake_case_ = get_audio_spectrogram_transformer_config(lowerCAmelCase_ ) snake_case_ = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict snake_case_ = model_name_to_url[model_name] snake_case_ = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" ) # remove some keys remove_keys(lowerCAmelCase_ ) # rename some keys snake_case_ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ ) # load 🤗 model snake_case_ = ASTForAudioClassification(lowerCAmelCase_ ) model.eval() model.load_state_dict(lowerCAmelCase_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 snake_case_ = -4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8 snake_case_ = 4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6 snake_case_ = 1_024 if '''speech-commands''' not in model_name else 128 snake_case_ = ASTFeatureExtractor(mean=lowerCAmelCase_ , std=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) if "speech-commands" in model_name: snake_case_ = load_dataset("speech_commands" , "v0.02" , split="validation" ) snake_case_ = dataset[0]['''audio''']['''array'''] else: snake_case_ = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) snake_case_ = torchaudio.load(lowerCAmelCase_ ) snake_case_ = waveform.squeeze().numpy() snake_case_ = feature_extractor(lowerCAmelCase_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass snake_case_ = model(**lowerCAmelCase_ ) snake_case_ = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": snake_case_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": snake_case_ = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": snake_case_ = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": snake_case_ = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": snake_case_ = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": snake_case_ = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": snake_case_ = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": snake_case_ = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ): raise ValueError("Logits don\'t match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''ast-finetuned-audioset-10-10-0.4593''', type=str, help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
283
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class lowerCamelCase__ : def __init__( self , snake_case ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowercase : Optional[Any] = len(__UpperCAmelCase ) - 1 def _UpperCAmelCase ( self , snake_case ) -> List[Any]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCAmelCase ) , 5 ) == 1 return output_values def _UpperCAmelCase ( self , snake_case ) -> List[str]: """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." lowercase : Union[str, Any] = self.basis_function(__UpperCAmelCase ) lowercase : List[Any] = 0.0 lowercase : Any = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _UpperCAmelCase ( self , snake_case = 0.01 ) -> str: """simple docstring""" from matplotlib import pyplot as plt # type: ignore lowercase : list[float] = [] # x coordinates of points to plot lowercase : list[float] = [] # y coordinates of points to plot lowercase : int = 0.0 while t <= 1: lowercase : Tuple = self.bezier_curve_function(__UpperCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowercase : Optional[int] = [i[0] for i in self.list_of_points] lowercase : List[str] = [i[1] for i in self.list_of_points] plt.plot( __UpperCAmelCase , __UpperCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
607
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : Dict = 10 , lowercase : str = 22 ) -> int: """simple docstring""" snake_case : Tuple = range(1 , lowercase ) snake_case : Tuple = range(1 , lowercase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F'''{solution(10, 22) = }''')
178
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
def lowerCamelCase_(lowerCamelCase_ ) -> Tuple: # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection UpperCAmelCase = len(lowerCamelCase_ ) UpperCAmelCase = max(lowerCamelCase_ ) UpperCAmelCase = min(lowerCamelCase_ ) # create the counting array UpperCAmelCase = coll_max + 1 - coll_min UpperCAmelCase = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCamelCase_ ): UpperCAmelCase = counting_arr[i] + counting_arr[i - 1] # create the output collection UpperCAmelCase = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCamelCase_ ) ): UpperCAmelCase = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]: return "".join([chr(lowerCamelCase_ ) for i in counting_sort([ord(lowerCamelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" __lowerCamelCase : Dict = input("Enter numbers separated by a comma:\n").strip() __lowerCamelCase : List[str] = [int(item) for item in user_input.split(",")] print(counting_sort(unsorted))
323
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: # Initialise PyTorch model snake_case__ = BigBirdConfig.from_json_file(__lowerCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: snake_case__ = BigBirdForQuestionAnswering(__lowerCAmelCase ) else: snake_case__ = BigBirdForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) lowerCamelCase__ : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
33
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class __SCREAMING_SNAKE_CASE ( _lowercase ): snake_case : Any = """conditional_detr""" snake_case : Any = ["""past_key_values"""] snake_case : List[str] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=3 , __lowerCAmelCase=300 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase="resnet50" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.25 , **__lowerCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCamelCase__ = CONFIG_MAPPING['''resnet'''](out_features=["""stage4"""] ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCamelCase__ = backbone_config.get("""model_type""" ) UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ = config_class.from_dict(__UpperCAmelCase ) UpperCamelCase__ = use_timm_backbone UpperCamelCase__ = backbone_config UpperCamelCase__ = num_channels UpperCamelCase__ = num_queries UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = init_xavier_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = encoder_layers UpperCamelCase__ = auxiliary_loss UpperCamelCase__ = position_embedding_type UpperCamelCase__ = backbone UpperCamelCase__ = use_pretrained_backbone UpperCamelCase__ = dilation # Hungarian matcher UpperCamelCase__ = class_cost UpperCamelCase__ = bbox_cost UpperCamelCase__ = giou_cost # Loss coefficients UpperCamelCase__ = mask_loss_coefficient UpperCamelCase__ = dice_loss_coefficient UpperCamelCase__ = cls_loss_coefficient UpperCamelCase__ = bbox_loss_coefficient UpperCamelCase__ = giou_loss_coefficient UpperCamelCase__ = focal_alpha super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCamelCase ( self ): return self.encoder_attention_heads @property def _lowerCamelCase ( self ): return self.d_model def _lowerCamelCase ( self ): UpperCamelCase__ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCamelCase__ = self.backbone_config.to_dict() UpperCamelCase__ = self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( _lowercase ): snake_case : int = version.parse("""1.11""" ) @property def _lowerCamelCase ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowerCamelCase ( self ): return 1E-5 @property def _lowerCamelCase ( self ): return 12
619
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def A__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ): # Initialise PyTorch model SCREAMING_SNAKE_CASE__: List[str]= LxmertConfig.from_json_file(snake_case_ ) print(F'Building PyTorch model from configuration: {config}' ) SCREAMING_SNAKE_CASE__: int= LxmertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": lowercase_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
64
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCamelCase ( _lowercase ,_lowercase ,_lowercase ,unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : Tuple =AltDiffusionPipeline __UpperCAmelCase : Tuple =TEXT_TO_IMAGE_PARAMS __UpperCAmelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS def snake_case ( self ): torch.manual_seed(0 ) __lowerCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) __lowerCAmelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) __lowerCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , ) __lowerCAmelCase = CLIPTextModel(__UpperCAmelCase ) __lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) __lowerCAmelCase = 77 __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def snake_case ( self , __a , __a=0 ): if str(__UpperCAmelCase ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(__UpperCAmelCase ) else: __lowerCAmelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def snake_case ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def snake_case ( self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() torch.manual_seed(0 ) __lowerCAmelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder __lowerCAmelCase = RobertaSeriesModelWithTransformation(__UpperCAmelCase ) __lowerCAmelCase = text_encoder __lowerCAmelCase = AltDiffusionPipeline(**__UpperCAmelCase ) __lowerCAmelCase = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCAmelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCAmelCase = '''A photo of an astronaut''' __lowerCAmelCase = alt_pipe(**__UpperCAmelCase ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case ( self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) torch.manual_seed(0 ) __lowerCAmelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , ) # TODO: remove after fixing the non-deterministic text encoder __lowerCAmelCase = RobertaSeriesModelWithTransformation(__UpperCAmelCase ) __lowerCAmelCase = text_encoder __lowerCAmelCase = AltDiffusionPipeline(**__UpperCAmelCase ) __lowerCAmelCase = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCAmelCase = self.get_dummy_inputs(__UpperCAmelCase ) __lowerCAmelCase = alt_pipe(**__UpperCAmelCase ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): # make sure here that pndm scheduler skips prk __lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__UpperCAmelCase ) __lowerCAmelCase = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCAmelCase = '''A painting of a squirrel eating a burger''' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = alt_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case ( self ): __lowerCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" ) __lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) __lowerCAmelCase = alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __lowerCAmelCase = '''A painting of a squirrel eating a burger''' __lowerCAmelCase = torch.manual_seed(0 ) __lowerCAmelCase = alt_pipe([prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="numpy" ) __lowerCAmelCase = output.images __lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCAmelCase = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
636
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Any = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[int] = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _a : str = None _a : int = logging.get_logger(__name__) _a : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} _a : Union[str, Any] = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } _a : Optional[Any] = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } _a : Optional[Any] = '▁' class lowercase_ ( _lowercase ): '''simple docstring''' __lowerCAmelCase : Dict = VOCAB_FILES_NAMES __lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"] __lowerCAmelCase : Union[str, Any] = BarthezTokenizer def __init__( self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , **a_ , ) -> Any: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) UpperCAmelCase = vocab_file UpperCAmelCase = False if not self.vocab_file else True def snake_case_ ( self , a_ , a_ = None ) -> Optional[Any]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case_ ( self , a_ , a_ = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case_ ( self , a_ , a_ = None ) -> List[Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
447
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __A ( _lowercase ): def _lowercase (self : Tuple ): UpperCAmelCase_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "depth_multiplier" ) ) class __A : def __init__(self : Any , __a : Dict , __a : Optional[Any]=13 , __a : Optional[Any]=3 , __a : Any=32 , __a : Union[str, Any]=0.25 , __a : List[str]=8 , __a : Optional[Any]=8 , __a : List[str]=6 , __a : Optional[Any]=32 , __a : int=True , __a : Tuple=True , __a : List[str]=True , __a : Dict="relu6" , __a : Tuple=1280 , __a : List[str]=0.1 , __a : List[str]=0.02 , __a : Union[str, Any]=True , __a : Tuple=True , __a : Any=10 , __a : Optional[int]=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = depth_multiplier UpperCAmelCase_ = depth_divisible_by UpperCAmelCase_ = min_depth UpperCAmelCase_ = expand_ratio UpperCAmelCase_ = tf_padding UpperCAmelCase_ = output_stride UpperCAmelCase_ = first_layer_is_expansion UpperCAmelCase_ = finegrained_output UpperCAmelCase_ = hidden_act UpperCAmelCase_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) UpperCAmelCase_ = classifier_dropout_prob UpperCAmelCase_ = use_labels UpperCAmelCase_ = is_training UpperCAmelCase_ = num_labels UpperCAmelCase_ = initializer_range UpperCAmelCase_ = scope def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels, pixel_labels def _lowercase (self : int ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _lowercase (self : Dict , __a : Dict , __a : str , __a : Any , __a : Tuple ): UpperCAmelCase_ = MobileNetVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def _lowercase (self : List[str] , __a : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MobileNetVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase (self : int , __a : int , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MobileNetVaForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCAmelCase_ = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowercase (self : str ): UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __A ( _lowercase , _lowercase , unittest.TestCase ): a__ : Optional[Any] = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) a__ : Any = ( { """feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification, """image-segmentation""": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) a__ : Tuple = False a__ : Optional[int] = False a__ : List[Any] = False a__ : Union[str, Any] = False def _lowercase (self : Optional[int] ): UpperCAmelCase_ = MobileNetVaModelTester(self ) UpperCAmelCase_ = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowercase (self : int ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds" ) def _lowercase (self : Any ): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings" ) def _lowercase (self : int ): pass @unittest.skip(reason="MobileNetV2 does not output attentions" ) def _lowercase (self : List[Any] ): pass def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__UpperCAmelCase ) UpperCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowercase (self : str ): def check_hidden_states_output(__a : List[str] , __a : Optional[int] , __a : Optional[int] ): UpperCAmelCase_ = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) UpperCAmelCase_ = outputs.hidden_states UpperCAmelCase_ = 16 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _lowercase (self : Any ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _lowercase (self : int ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) @slow def _lowercase (self : Optional[Any] ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = MobileNetVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __A ( unittest.TestCase ): @cached_property def _lowercase (self : Union[str, Any] ): return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None ) @slow def _lowercase (self : Tuple ): UpperCAmelCase_ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(__UpperCAmelCase ) UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase_ = model(**__UpperCAmelCase ) # verify the logits UpperCAmelCase_ = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) UpperCAmelCase_ = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def _lowercase (self : Optional[int] ): UpperCAmelCase_ = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) UpperCAmelCase_ = model.to(__UpperCAmelCase ) UpperCAmelCase_ = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase_ = model(**__UpperCAmelCase ) UpperCAmelCase_ = outputs.logits # verify the logits UpperCAmelCase_ = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , __UpperCAmelCase ) UpperCAmelCase_ = torch.tensor( [ [[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]], [[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]], [[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
283
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase: str =logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( _lowercase ): __UpperCAmelCase = ["""pixel_values"""] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ) -> str: """simple docstring""" super().__init__(**__UpperCAmelCase ) lowercase : Optional[int] = size if size is not None else {'''shortest_edge''': 2_2_4} lowercase : str = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowercase : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name="""crop_size""" ) lowercase : Optional[Any] = do_resize lowercase : Tuple = size lowercase : Optional[Any] = resample lowercase : Dict = do_center_crop lowercase : Optional[Any] = crop_size lowercase : Union[str, Any] = do_rescale lowercase : Optional[Any] = rescale_factor lowercase : Union[str, Any] = do_normalize lowercase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase : Dict = do_convert_rgb def _UpperCAmelCase ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ) -> Dict: """simple docstring""" lowercase : int = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> List[Any]: """simple docstring""" return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> int: """simple docstring""" return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> Union[str, Any]: """simple docstring""" lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : str = size if size is not None else self.size lowercase : str = get_size_dict(__UpperCAmelCase , param_name="""size""" , default_to_square=__UpperCAmelCase ) lowercase : Dict = resample if resample is not None else self.resample lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : Any = crop_size if crop_size is not None else self.crop_size lowercase : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name="""crop_size""" , default_to_square=__UpperCAmelCase ) lowercase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize lowercase : Tuple = image_mean if image_mean is not None else self.image_mean lowercase : Optional[Any] = image_std if image_std is not None else self.image_std lowercase : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase : Union[str, Any] = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase : Dict = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowercase : Any = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: lowercase : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: lowercase : Optional[Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: lowercase : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: lowercase : Tuple = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] lowercase : Dict = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] lowercase : Union[str, Any] = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
607
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
178
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1_024 , lowerCamelCase_=1_024 , lowerCamelCase_=False , **lowerCamelCase_ ) -> Any: UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ ) UpperCAmelCase = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path="train" , **lowerCamelCase_ ) UpperCAmelCase = tok.pad_token_id def get_lens(lowerCamelCase_ ): UpperCAmelCase = tqdm( DataLoader(lowerCamelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) UpperCAmelCase = [] for batch in dl: UpperCAmelCase = batch['''input_ids'''].ne(lowerCamelCase_ ).sum(1 ).tolist() UpperCAmelCase = batch['''labels'''].ne(lowerCamelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCamelCase_ , lowerCamelCase_ ): max_lens.append(max(lowerCamelCase_ , lowerCamelCase_ ) ) else: max_lens.extend(lowerCamelCase_ ) return max_lens UpperCAmelCase = get_lens(lowerCamelCase_ ) UpperCAmelCase = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path="val" , **lowerCamelCase_ ) UpperCAmelCase = get_lens(lowerCamelCase_ ) pickle_save(lowerCamelCase_ , train_ds.len_file ) pickle_save(lowerCamelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
323
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[Any] = { """google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""", # See all ViT models at https://huggingface.co/models?filter=vit } class __magic_name__ (_lowercase ): '''simple docstring''' __lowercase : Any = 'vit' def __init__( self:List[Any] , _a:Union[str, Any]=7_68 , _a:Dict=12 , _a:Optional[int]=12 , _a:Dict=30_72 , _a:List[Any]="gelu" , _a:Optional[Any]=0.0 , _a:Any=0.0 , _a:int=0.02 , _a:Any=1e-12 , _a:List[Any]=2_24 , _a:Optional[int]=16 , _a:str=3 , _a:Optional[int]=True , _a:int=16 , **_a:Tuple , ): super().__init__(**__UpperCAmelCase ) snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = initializer_range snake_case__ = layer_norm_eps snake_case__ = image_size snake_case__ = patch_size snake_case__ = num_channels snake_case__ = qkv_bias snake_case__ = encoder_stride class __magic_name__ (_lowercase ): '''simple docstring''' __lowercase : Union[str, Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE__ ( self:int ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE__ ( self:Tuple ): return 1e-4
33
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ = { "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
619
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowercase_ : str = logging.get_logger(__name__) lowercase_ : Dict = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) lowercase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def A__ ( snake_case_ : List[Any] ): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE__: Tuple= model_type_to_module_name(snake_case_ ) SCREAMING_SNAKE_CASE__: Union[str, Any]= importlib.import_module(F'.{module_name}' , '''transformers.models''' ) try: return getattr(snake_case_ , snake_case_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE__: List[Any]= importlib.import_module('''transformers''' ) if hasattr(snake_case_ , snake_case_ ): return getattr(snake_case_ , snake_case_ ) return None def A__ ( snake_case_ : str , snake_case_ : List[Any] = None , snake_case_ : Union[str, Any] = False , snake_case_ : List[str] = False , snake_case_ : Dict = None , snake_case_ : Optional[Any] = None , snake_case_ : Tuple = None , snake_case_ : Any = False , **snake_case_ : int , ): SCREAMING_SNAKE_CASE__: List[str]= get_file_from_repo( snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(snake_case_ , encoding='''utf-8''' ) as reader: return json.load(snake_case_ ) class _lowerCamelCase : def __init__( self ) -> Tuple: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__UpperCAmelCase ) def UpperCamelCase_ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''config''' , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= kwargs.pop('''trust_remote_code''' , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Any= True SCREAMING_SNAKE_CASE__: List[Any]= ImageProcessingMixin.get_image_processor_dict(__UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= config_dict.get('''image_processor_type''' , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__: Tuple= config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: SCREAMING_SNAKE_CASE__: Dict= config_dict.pop('''feature_extractor_type''' , __UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) SCREAMING_SNAKE_CASE__: List[Any]= feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): SCREAMING_SNAKE_CASE__: str= config_dict['''auto_map''']['''AutoFeatureExtractor'''] SCREAMING_SNAKE_CASE__: Any= feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__: Optional[int]= AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) # It could be in `config.image_processor_type`` SCREAMING_SNAKE_CASE__: Dict= getattr(__UpperCAmelCase , '''image_processor_type''' , __UpperCAmelCase ) if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: SCREAMING_SNAKE_CASE__: str= config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: SCREAMING_SNAKE_CASE__: List[Any]= image_processor_class_from_name(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= image_processor_auto_map is not None SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processor_class is not None or type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING SCREAMING_SNAKE_CASE__: List[Any]= resolve_trust_remote_code( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE__: List[Any]= get_class_from_dynamic_module( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''code_revision''' , __UpperCAmelCase ) if os.path.isdir(__UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: SCREAMING_SNAKE_CASE__: str= IMAGE_PROCESSOR_MAPPING[type(__UpperCAmelCase )] return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) raise ValueError( f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ' f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ' f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' ) @staticmethod def UpperCamelCase_ ( lowerCAmelCase , lowerCAmelCase ) -> Any: IMAGE_PROCESSOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
64
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" A : Optional[Any] = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [False] * len(_UpperCamelCase ) __lowerCAmelCase = [s] __lowerCAmelCase = True while queue: __lowerCAmelCase = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) __lowerCAmelCase = True __lowerCAmelCase = u return visited[t] def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [-1] * (len(_UpperCamelCase )) __lowerCAmelCase = 0 __lowerCAmelCase = [] __lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy. while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase = float("Inf" ) __lowerCAmelCase = sink while s != source: # Find the minimum value in select path __lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] ) __lowerCAmelCase = parent[s] max_flow += path_flow __lowerCAmelCase = sink while v != source: __lowerCAmelCase = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowerCAmelCase = parent[v] for i in range(len(_UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
636
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowercase : List[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') @dataclass class _UpperCAmelCase : a__ : Optional[int] = field( default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} ) a__ : Optional[Any] = field( default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a__ : List[Any] = field( default=_lowercase , metadata={"help": "The column name of the images in the files."} ) a__ : Tuple = field(default=_lowercase , metadata={"help": "A folder containing the training data."} ) a__ : Union[str, Any] = field(default=_lowercase , metadata={"help": "A folder containing the validation data."} ) a__ : Union[str, Any] = field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) a__ : Tuple = field( default=_lowercase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) a__ : Optional[Any] = field( default=_lowercase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def a ( self : Tuple ): __UpperCAmelCase = {} if self.train_dir is not None: __UpperCAmelCase = self.train_dir if self.validation_dir is not None: __UpperCAmelCase = self.validation_dir __UpperCAmelCase = data_files if data_files else None @dataclass class _UpperCAmelCase : a__ : List[str] = field( default=_lowercase , metadata={ "help": ( "The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch." ) } , ) a__ : str = field( default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} ) a__ : Optional[Any] = field( default=_lowercase , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) a__ : Dict = field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) a__ : List[Any] = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) a__ : int = field(default=_lowercase , metadata={"help": "Name or path of preprocessor config."} ) a__ : Optional[Any] = field( default=_lowercase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) a__ : Dict = field( default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} ) a__ : int = field( default=_lowercase , metadata={"help": "Whether or not to train with normalized pixel values as target."} ) @dataclass class _UpperCAmelCase ( _lowercase ): a__ : Union[str, Any] = field( default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} ) def lowercase__ ( snake_case_ :List[Any] ): __UpperCAmelCase = torch.stack([example['''pixel_values'''] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''' , snake_case_ , snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(snake_case_ ) transformers.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __UpperCAmelCase = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0: __UpperCAmelCase = ds['''train'''].train_test_split(data_args.train_val_split ) __UpperCAmelCase = split['''train'''] __UpperCAmelCase = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: __UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ ) elif model_args.model_name_or_path: __UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: __UpperCAmelCase = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ ) elif model_args.model_name_or_path: __UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: __UpperCAmelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: __UpperCAmelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __UpperCAmelCase = ViTMAEForPreTraining(snake_case_ ) if training_args.do_train: __UpperCAmelCase = ds['''train'''].column_names else: __UpperCAmelCase = ds['''validation'''].column_names if data_args.image_column_name is not None: __UpperCAmelCase = data_args.image_column_name elif "image" in column_names: __UpperCAmelCase = '''image''' elif "img" in column_names: __UpperCAmelCase = '''img''' else: __UpperCAmelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __UpperCAmelCase = image_processor.size['''shortest_edge'''] else: __UpperCAmelCase = (image_processor.size['''height'''], image_processor.size['''width''']) __UpperCAmelCase = Compose( [ Lambda(lambda snake_case_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(snake_case_ :Tuple ): __UpperCAmelCase = [transforms(snake_case_ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __UpperCAmelCase = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case_ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __UpperCAmelCase = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case_ ) # Compute absolute learning rate __UpperCAmelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __UpperCAmelCase = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer __UpperCAmelCase = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: __UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase = last_checkpoint __UpperCAmelCase = trainer.train(resume_from_checkpoint=snake_case_ ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case_ ) trainer.save_metrics('''eval''' , snake_case_ ) # Write model card and (optionally) push to hub __UpperCAmelCase = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case_ ) else: trainer.create_model_card(**snake_case_ ) def lowercase__ ( snake_case_ :Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
49
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def lowerCamelCase__ ( ): UpperCAmelCase = 10 UpperCAmelCase = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) UpperCAmelCase = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(SCREAMING_SNAKE_CASE ) ), } , features=SCREAMING_SNAKE_CASE , ) return dataset @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=SCREAMING_SNAKE_CASE ) return filename # FILE_CONTENT + files _a : Union[str, Any] = '\\n Text data.\n Second line of data.' @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt''' UpperCAmelCase = FILE_CONTENT with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return filename @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): import bza UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.bz2''' UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): import gzip UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any ): if datasets.config.LZ4_AVAILABLE: import lza.frame UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.lz4''' UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) with lza.frame.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ): if datasets.config.PY7ZR_AVAILABLE: import pyazr UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.7z''' with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , 'w' ) as archive: archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ): import tarfile UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.tar''' with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int ): import lzma UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.xz''' UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) with lzma.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ): import zipfile UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.txt.zst''' UpperCAmelCase = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) with zstd.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''file.xml''' UpperCAmelCase = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return filename _a : str = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] _a : Any = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] _a : str = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } _a : Tuple = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] _a : Any = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope='session' ) def lowerCamelCase__ ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE ) UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con: UpperCAmelCase = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f: UpperCAmelCase = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as f: UpperCAmelCase = csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ): import bza UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.bz2''' with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: UpperCAmelCase = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(SCREAMING_SNAKE_CASE , 'wb' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) UpperCAmelCase = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(SCREAMING_SNAKE_CASE , 'wb' ) as f: UpperCAmelCase = pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE ) UpperCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE ) writer.write_table(SCREAMING_SNAKE_CASE ) writer.close() return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase = {'''data''': DATA} with open(SCREAMING_SNAKE_CASE , 'w' ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) UpperCAmelCase = {'''data''': DATA_DICT_OF_LISTS} with open(SCREAMING_SNAKE_CASE , 'w' ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in DATA: f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in DATA: f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in DATA_312: f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ): UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ): import gzip UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file: with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file: zipped_file.writelines(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ): import gzip UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(SCREAMING_SNAKE_CASE , 'rb' ) as orig_file: with gzip.open(SCREAMING_SNAKE_CASE , 'wb' ) as zipped_file: zipped_file.writelines(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('nested' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ): UpperCAmelCase = ['''0''', '''1''', '''2''', '''3'''] UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = ['''0''', '''1''', '''2''', '''3'''] UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ): UpperCAmelCase = ['''0''', '''1''', '''2''', '''3'''] UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.abc''' with open(SCREAMING_SNAKE_CASE , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.text.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('main_dir' , os.path.basename(SCREAMING_SNAKE_CASE ) ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.ext.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported.ext' ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ): UpperCAmelCase = '''\n'''.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) UpperCAmelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(SCREAMING_SNAKE_CASE ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( ): return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def lowerCamelCase__ ( ): return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = tmp_path_factory.mktemp('data' ) / '''dataset.img.zip''' with zipfile.ZipFile(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) ) f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
447
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple =['model.decoder.embed_positions.weights'] def lowerCAmelCase_ ( snake_case_ : int ) -> Tuple: '''simple docstring''' if "emb" in name: UpperCAmelCase_ = name.replace("emb" , "model.decoder.embed_tokens" ) if "transformer" in name: UpperCAmelCase_ = name.replace("transformer" , "model.decoder" ) if "cross_attention" in name: UpperCAmelCase_ = name.replace("cross_attention" , "encoder_attn" ) if "linear1" in name: UpperCAmelCase_ = name.replace("linear1" , "fc1" ) if "linear2" in name: UpperCAmelCase_ = name.replace("linear2" , "fc2" ) if "norm1" in name: UpperCAmelCase_ = name.replace("norm1" , "self_attn_layer_norm" ) if "norm_cross" in name: UpperCAmelCase_ = name.replace("norm_cross" , "encoder_attn_layer_norm" ) if "norm2" in name: UpperCAmelCase_ = name.replace("norm2" , "final_layer_norm" ) if "out_norm" in name: UpperCAmelCase_ = name.replace("out_norm" , "model.decoder.layer_norm" ) if "linears" in name: UpperCAmelCase_ = name.replace("linears" , "lm_heads" ) if "condition_provider.conditioners.description.output_proj" in name: UpperCAmelCase_ = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" ) return name def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any ) -> Tuple[Dict, Dict]: '''simple docstring''' UpperCAmelCase_ = list(state_dict.keys() ) UpperCAmelCase_ = {} for key in keys: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = rename_keys(snake_case_ ) if "in_proj_weight" in key: # split fused qkv proj UpperCAmelCase_ = val[:hidden_size, :] UpperCAmelCase_ = val[hidden_size : 2 * hidden_size, :] UpperCAmelCase_ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: UpperCAmelCase_ = val else: UpperCAmelCase_ = val return state_dict, enc_dec_proj_state_dict def lowerCAmelCase_ ( snake_case_ : int ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values UpperCAmelCase_ = 10_24 UpperCAmelCase_ = 24 UpperCAmelCase_ = 16 elif checkpoint == "medium": UpperCAmelCase_ = 15_36 UpperCAmelCase_ = 48 UpperCAmelCase_ = 24 elif checkpoint == "large": UpperCAmelCase_ = 20_48 UpperCAmelCase_ = 48 UpperCAmelCase_ = 32 else: raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) UpperCAmelCase_ = MusicgenDecoderConfig( hidden_size=snake_case_ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , ) return config @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict="cpu" ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = MusicGen.get_pretrained(snake_case_ , device=snake_case_ ) UpperCAmelCase_ = decoder_config_from_checkpoint(snake_case_ ) UpperCAmelCase_ = fairseq_model.lm.state_dict() UpperCAmelCase_ = rename_state_dict( snake_case_ , hidden_size=decoder_config.hidden_size ) UpperCAmelCase_ = TaEncoderModel.from_pretrained("t5-base" ) UpperCAmelCase_ = EncodecModel.from_pretrained("facebook/encodec_32khz" ) UpperCAmelCase_ = MusicgenForCausalLM(snake_case_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection UpperCAmelCase_ = decoder.load_state_dict(snake_case_ , strict=snake_case_ ) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case_ ) if len(snake_case_ ) > 0: raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" ) if len(snake_case_ ) > 0: raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model UpperCAmelCase_ = MusicgenForConditionalGeneration(text_encoder=snake_case_ , audio_encoder=snake_case_ , decoder=snake_case_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case_ ) # check we can do a forward pass UpperCAmelCase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) UpperCAmelCase_ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): UpperCAmelCase_ = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ ).logits if logits.shape != (8, 1, 20_48): raise ValueError("Incorrect shape for logits" ) # now construct the processor UpperCAmelCase_ = AutoTokenizer.from_pretrained("t5-base" ) UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" ) UpperCAmelCase_ = MusicgenProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ ) # set the appropriate bos/pad token ids UpperCAmelCase_ = 20_48 UpperCAmelCase_ = 20_48 # set other default generation config params UpperCAmelCase_ = int(30 * audio_encoder.config.frame_rate ) UpperCAmelCase_ = True UpperCAmelCase_ = 3.0 if pytorch_dump_folder is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if repo_id: logger.info(f"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(snake_case_ ) processor.push_to_hub(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
from scipy.stats import pearsonr import datasets SCREAMING_SNAKE_CASE :str = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' SCREAMING_SNAKE_CASE :List[str] = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' SCREAMING_SNAKE_CASE :Optional[int] = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , ) def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=False ) -> str: """simple docstring""" if return_pvalue: snake_case_ = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
283
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase: str =logging.get_logger(__name__) lowerCAmelCase: str ={ "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class lowerCamelCase__ ( _lowercase ): __UpperCAmelCase = """mvp""" __UpperCAmelCase = ["""past_key_values"""] __UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , snake_case=5_0_2_6_7 , snake_case=1_0_2_4 , snake_case=1_2 , snake_case=4_0_9_6 , snake_case=1_6 , snake_case=1_2 , snake_case=4_0_9_6 , snake_case=1_6 , snake_case=0.0 , snake_case=0.0 , snake_case="gelu" , snake_case=1_0_2_4 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.0 , snake_case=False , snake_case=True , snake_case=1 , snake_case=0 , snake_case=2 , snake_case=True , snake_case=2 , snake_case=2 , snake_case=False , snake_case=1_0_0 , snake_case=8_0_0 , **snake_case , ) -> List[Any]: """simple docstring""" lowercase : Union[str, Any] = vocab_size lowercase : Any = max_position_embeddings lowercase : List[str] = d_model lowercase : Any = encoder_ffn_dim lowercase : str = encoder_layers lowercase : Union[str, Any] = encoder_attention_heads lowercase : Tuple = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : List[Any] = decoder_attention_heads lowercase : Optional[int] = dropout lowercase : Any = attention_dropout lowercase : Optional[int] = activation_dropout lowercase : Union[str, Any] = activation_function lowercase : str = init_std lowercase : Dict = encoder_layerdrop lowercase : Optional[Any] = decoder_layerdrop lowercase : Optional[int] = classifier_dropout lowercase : Union[str, Any] = use_cache lowercase : str = encoder_layers lowercase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True lowercase : Dict = use_prompt lowercase : Union[str, Any] = prompt_length lowercase : Union[str, Any] = prompt_mid_dim super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __UpperCAmelCase ): lowercase : Tuple = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' """The config can simply be saved and uploaded again to be fixed.""" )
607
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
178
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def lowerCamelCase_(lowerCamelCase_ ) -> Any: UpperCAmelCase = SwinConfig(image_size=192 ) if "base" in model_name: UpperCAmelCase = 6 UpperCAmelCase = 128 UpperCAmelCase = (2, 2, 18, 2) UpperCAmelCase = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase = 12 UpperCAmelCase = 192 UpperCAmelCase = (2, 2, 18, 2) UpperCAmelCase = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase = window_size UpperCAmelCase = embed_dim UpperCAmelCase = depths UpperCAmelCase = num_heads return config def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]: if "encoder.mask_token" in name: UpperCAmelCase = name.replace("encoder.mask_token" , "embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCAmelCase = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase = '''layernorm.weight''' if name == "encoder.norm.bias": UpperCAmelCase = '''layernorm.bias''' if "decoder" in name: pass else: UpperCAmelCase = '''swin.''' + name return name def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(lowerCamelCase_ ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase = key.split("." ) UpperCAmelCase = int(key_split[2] ) UpperCAmelCase = int(key_split[4] ) UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[ dim : dim * 2, : ] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[ :dim ] UpperCAmelCase = val[ dim : dim * 2 ] UpperCAmelCase = val[ -dim: ] else: UpperCAmelCase = val return orig_state_dict def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: UpperCAmelCase = torch.load(lowerCamelCase_ , map_location="cpu" )['''model'''] UpperCAmelCase = get_swin_config(lowerCamelCase_ ) UpperCAmelCase = SwinForMaskedImageModeling(lowerCamelCase_ ) model.eval() UpperCAmelCase = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = ViTImageProcessor(size={"height": 192, "width": 192} ) UpperCAmelCase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) UpperCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase = model(**lowerCamelCase_ ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase_ ) if push_to_hub: print(F'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(F'microsoft/{model_name}' ) image_processor.push_to_hub(F'microsoft/{model_name}' ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __lowerCamelCase : List[Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
323
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: # save results if os.path.exists(__lowerCAmelCase ): if os.path.exists(os.path.join(__lowerCAmelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , '''config.json''' ) ): os.remove(os.path.join(__lowerCAmelCase , '''config.json''' ) ) if os.path.exists(os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Tuple: snake_case__ = 2 if unlogit: snake_case__ = torch.pow(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = p * torch.log(__lowerCAmelCase ) snake_case__ = 0 return -plogp.sum(dim=-1 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict: logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(__lowerCAmelCase ) ) ) ) for row in range(len(__lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> str: snake_case__ = model.config.num_hidden_layers, model.config.num_attention_heads snake_case__ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) snake_case__ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) if head_mask is None: snake_case__ = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case__ = None snake_case__ = 0.0 snake_case__ = 0.0 for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): snake_case__ = tuple(t.to(args.device ) for t in inputs ) (snake_case__ ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case__ = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case__ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowerCAmelCase ): snake_case__ = entropy(attn.detach() , __lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case__ = 2 snake_case__ = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: snake_case__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(__lowerCAmelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(__lowerCAmelCase ) logger.info('''Head ranked by importance scores''' ) snake_case__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case__ = torch.arange( head_importance.numel() , device=args.device ) snake_case__ = head_ranks.view_as(__lowerCAmelCase ) print_ad_tensor(__lowerCAmelCase ) return attn_entropy, head_importance, total_loss def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: snake_case__ = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase ) snake_case__ = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , __lowerCAmelCase , original_score * args.masking_threshold ) snake_case__ = torch.ones_like(__lowerCAmelCase ) snake_case__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case__ = original_score while current_score >= original_score * args.masking_threshold: snake_case__ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case__ = float('''Inf''' ) snake_case__ = head_importance.view(-1 ).sort()[1] if len(__lowerCAmelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads snake_case__ = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) snake_case__ = new_head_mask.view(-1 ) snake_case__ = 0.0 snake_case__ = new_head_mask.view_as(__lowerCAmelCase ) snake_case__ = new_head_mask.clone().detach() print_ad_tensor(__lowerCAmelCase ) # Compute metric and head importance again snake_case__ = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase ) snake_case__ = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(__lowerCAmelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: snake_case__ = datetime.now() snake_case__ = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase ) snake_case__ = 1 / loss snake_case__ = datetime.now() - before_time snake_case__ = sum(p.numel() for p in model.parameters() ) snake_case__ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = [ v, ] assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowerCAmelCase ) snake_case__ = sum(p.numel() for p in model.parameters() ) snake_case__ = datetime.now() snake_case__ = compute_heads_importance( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , ) snake_case__ = 1 / loss snake_case__ = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __lowerCAmelCase , __lowerCAmelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(__lowerCAmelCase , args.output_dir ) def SCREAMING_SNAKE_CASE ( ) -> Dict: snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=__lowerCAmelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=__lowerCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=__lowerCAmelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=__lowerCAmelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=__lowerCAmelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=__lowerCAmelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCAmelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=__lowerCAmelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=42 ) parser.add_argument('''--local_rank''' , type=__lowerCAmelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=__lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCAmelCase , default='''''' , help='''Can be used for distant debugging.''' ) snake_case__ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case__ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) snake_case__ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case__ = torch.device('''cuda''' , args.local_rank ) snake_case__ = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case__ = nn.parallel.DistributedDataParallel( __lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase ) elif args.n_gpu > 1: snake_case__ = nn.DataParallel(__lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase ) torch.save(__lowerCAmelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Prepare dataset snake_case__ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case__ = (torch.from_numpy(__lowerCAmelCase ),) snake_case__ = TensorDataset(*__lowerCAmelCase ) snake_case__ = RandomSampler(__lowerCAmelCase ) snake_case__ = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case__ = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
33
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
0
def _UpperCamelCase (a__ :List[str] ): """simple docstring""" if n == 1 or not isinstance(a__ , a__ ): return 0 elif n == 2: return 1 else: UpperCamelCase__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _UpperCamelCase (a__ :Optional[Any] ): """simple docstring""" UpperCamelCase__ = 0 UpperCamelCase__ = 2 while digits < n: index += 1 UpperCamelCase__ = len(str(fibonacci(a__ ) ) ) return index def _UpperCamelCase (a__ :List[Any] = 1000 ): """simple docstring""" return fibonacci_digits_index(a__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
619
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Dict= tempfile.mkdtemp() SCREAMING_SNAKE_CASE__: str= [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE__: str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE__: int= { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def UpperCamelCase_ ( self , **lowerCAmelCase ) -> int: return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Any: return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCamelCase_ ( self , **lowerCAmelCase ) -> str: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def UpperCamelCase_ ( self ) -> str: shutil.rmtree(self.tmpdirname ) def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__: Dict= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizer() SCREAMING_SNAKE_CASE__: Any= self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_image_processor() SCREAMING_SNAKE_CASE__: str= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__: Any= AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: int= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__: Dict= AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: int= AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) SCREAMING_SNAKE_CASE__: List[Any]= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE__: Optional[int]= AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: List[Any]= self.get_image_processor() SCREAMING_SNAKE_CASE__: int= self.get_tokenizer() SCREAMING_SNAKE_CASE__: List[Any]= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= self.prepare_image_inputs() SCREAMING_SNAKE_CASE__: str= image_processor(__UpperCAmelCase , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__: List[str]= processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Dict= self.get_image_processor() SCREAMING_SNAKE_CASE__: Dict= self.get_tokenizer() SCREAMING_SNAKE_CASE__: str= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: int= '''lower newer''' SCREAMING_SNAKE_CASE__: int= processor(text=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= tokenizer(__UpperCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase_ ( self ) -> str: SCREAMING_SNAKE_CASE__: str= self.get_image_processor() SCREAMING_SNAKE_CASE__: Any= self.get_tokenizer() SCREAMING_SNAKE_CASE__: Union[str, Any]= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= '''lower newer''' SCREAMING_SNAKE_CASE__: Optional[int]= self.prepare_image_inputs() SCREAMING_SNAKE_CASE__: List[Any]= processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def UpperCamelCase_ ( self ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_image_processor() SCREAMING_SNAKE_CASE__: int= self.get_tokenizer() SCREAMING_SNAKE_CASE__: Any= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE__: List[Any]= processor.batch_decode(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= self.get_image_processor() SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizer() SCREAMING_SNAKE_CASE__: List[Any]= AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''lower newer''' SCREAMING_SNAKE_CASE__: Dict= self.prepare_image_inputs() SCREAMING_SNAKE_CASE__: Optional[int]= processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
64
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
0
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets A : str = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" A : Optional[int] = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" A : List[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class _UpperCamelCase ( datasets.Metric ): '''simple docstring''' def snake_case ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def snake_case ( self , __a , __a , __a=None , __a=None , __a=None , __a=None , __a="auto" , __a=-1 , __a=0.9 , __a=5 , __a=5_00 , __a="gpt2-large" , __a=-1 , __a=10_24 , __a=25 , __a=5 , __a=True , __a=25 , ): __lowerCAmelCase = compute_mauve( p_text=__UpperCAmelCase , q_text=__UpperCAmelCase , p_features=__UpperCAmelCase , q_features=__UpperCAmelCase , p_tokens=__UpperCAmelCase , q_tokens=__UpperCAmelCase , num_buckets=__UpperCAmelCase , pca_max_data=__UpperCAmelCase , kmeans_explained_var=__UpperCAmelCase , kmeans_num_redo=__UpperCAmelCase , kmeans_max_iter=__UpperCAmelCase , featurize_model_name=__UpperCAmelCase , device_id=__UpperCAmelCase , max_text_length=__UpperCAmelCase , divergence_curve_discretization_size=__UpperCAmelCase , mauve_scaling_factor=__UpperCAmelCase , verbose=__UpperCAmelCase , seed=__UpperCAmelCase , ) return out
636
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
0
"""simple docstring""" import numpy as np def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Union[str, Any] , snake_case_ :Tuple = 1E-12 , snake_case_ :int = 100 , ): assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1] # Ensure proper dimensionality. assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ ) __UpperCAmelCase = np.iscomplexobj(snake_case_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(snake_case_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __UpperCAmelCase = False __UpperCAmelCase = 0 __UpperCAmelCase = 0 __UpperCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) # Normalize the resulting output vector. __UpperCAmelCase = w / np.linalg.norm(snake_case_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __UpperCAmelCase = vector.conj().T if is_complex else vector.T __UpperCAmelCase = np.dot(snake_case_ , np.dot(snake_case_ , snake_case_ ) ) # Check convergence. __UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __UpperCAmelCase = True __UpperCAmelCase = lambda_ if is_complex: __UpperCAmelCase = np.real(lambda_ ) return lambda_, vector def lowercase__ ( ): __UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __UpperCAmelCase = np.array([41, 4, 20] ) __UpperCAmelCase = real_input_matrix.astype(np.complexaaa ) __UpperCAmelCase = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __UpperCAmelCase = real_input_matrix __UpperCAmelCase = real_vector elif problem_type == "complex": __UpperCAmelCase = complex_input_matrix __UpperCAmelCase = complex_vector # Our implementation. __UpperCAmelCase = power_iteration(snake_case_ , snake_case_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __UpperCAmelCase = np.linalg.eigh(snake_case_ ) # Last eigenvalue is the maximum one. __UpperCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __UpperCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
49
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
0
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node _a : Dict = 4 _a : Union[str, Any] = 3 class lowercase_ ( _lowercase ): '''simple docstring''' pass def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[str] ): for shard in shards: for i in range(SCREAMING_SNAKE_CASE ): yield {"i": i, "shard": shard} def lowerCamelCase__ ( ): UpperCAmelCase = int(os.environ['RANK'] ) UpperCAmelCase = int(os.environ['WORLD_SIZE'] ) UpperCAmelCase = ArgumentParser() parser.add_argument('--streaming' , type=SCREAMING_SNAKE_CASE ) parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE ) parser.add_argument('--num_workers' , type=SCREAMING_SNAKE_CASE , default=0 ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.streaming UpperCAmelCase = args.num_workers UpperCAmelCase = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE )]} UpperCAmelCase = IterableDataset.from_generator(SCREAMING_SNAKE_CASE , gen_kwargs=SCREAMING_SNAKE_CASE ) if not streaming: UpperCAmelCase = Dataset.from_list(list(SCREAMING_SNAKE_CASE ) ) UpperCAmelCase = split_dataset_by_node(SCREAMING_SNAKE_CASE , rank=SCREAMING_SNAKE_CASE , world_size=SCREAMING_SNAKE_CASE ) UpperCAmelCase = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE , num_workers=SCREAMING_SNAKE_CASE ) UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
447
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
0
'''simple docstring''' import warnings from .generation import TFGenerationMixin class __A ( _lowercase ): # warning at import time warnings.warn( """Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """ """be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _lowercase , )
78
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
0
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowerCAmelCase ( _lowercase , _lowercase ): """simple docstring""" @register_to_config def __init__( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int = None , _lowerCAmelCase : Any = None ) -> List[str]: """simple docstring""" super().__init__() snake_case_ = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" snake_case_ = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: snake_case_ = None snake_case_ = torch.nn.Parameter(__UpperCAmelCase ) class __lowerCAmelCase ( _lowercase ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 def __init__( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Tuple: """simple docstring""" snake_case_ = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings snake_case_ = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) snake_case_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length] snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt snake_case_ = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: snake_case_ = [''''''] * batch_size snake_case_ = text_input_ids.shape[-1] snake_case_ = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method snake_case_ = negative_prompt_embeds.shape[1] snake_case_ = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] = 1_0_0 , _lowerCAmelCase : Dict = 5.0 , _lowerCAmelCase : Any = 1.0 , _lowerCAmelCase : Union[str, Any] = 1 , _lowerCAmelCase : List[Any] = None , _lowerCAmelCase : List[Any] = None , _lowerCAmelCase : List[Any] = "pil" , _lowerCAmelCase : int = True , _lowerCAmelCase : int = None , _lowerCAmelCase : int = 1 , ) -> List[str]: """simple docstring""" if isinstance(__UpperCAmelCase , __UpperCAmelCase ): snake_case_ = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): snake_case_ = len(__UpperCAmelCase ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) snake_case_ = batch_size * num_images_per_prompt snake_case_ = guidance_scale > 1.0 snake_case_ = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it snake_case_ = (batch_size, self.transformer.num_latent_pixels) if latents is None: snake_case_ = self.transformer.num_vector_embeds - 1 snake_case_ = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) snake_case_ = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) snake_case_ = self.scheduler.timesteps.to(self.device ) snake_case_ = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` snake_case_ = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: snake_case_ = model_output.chunk(2 ) snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) snake_case_ = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) snake_case_ = model_output.clamp(-7_0 ) # compute the previous noisy sample x_t -> x_t-1 snake_case_ = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) snake_case_ = self.vqvae.config.vq_embed_dim snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) snake_case_ = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) snake_case_ = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample snake_case_ = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ) -> Union[str, Any]: """simple docstring""" snake_case_ = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) snake_case_ = torch.exp(__UpperCAmelCase ) snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) snake_case_ = torch.cat((all_true, keep_mask) , dim=1 ) snake_case_ = keep_mask[:, :-1, :] snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) ) snake_case_ = log_p_x_0.clone() snake_case_ = -torch.inf # -inf = log(0) return rv
283
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
0
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowerCamelCase__ ( _lowercase ): __UpperCAmelCase = """EncodecFeatureExtractor""" __UpperCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , snake_case , snake_case ) -> Tuple: """simple docstring""" super().__init__(__UpperCAmelCase , __UpperCAmelCase ) lowercase : Dict = self.feature_extractor lowercase : List[str] = False def _UpperCAmelCase ( self , snake_case=None , snake_case=None , snake_case=True ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase ) def __call__( self , *snake_case , **snake_case ) -> int: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) lowercase : Dict = kwargs.pop("""audio""" , __UpperCAmelCase ) lowercase : Any = kwargs.pop("""sampling_rate""" , __UpperCAmelCase ) lowercase : int = kwargs.pop("""text""" , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: lowercase : Any = args[0] lowercase : Dict = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: lowercase : int = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if audio is not None: lowercase : Union[str, Any] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: lowercase : Union[str, Any] = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: lowercase : Any = audio_inputs['''padding_mask'''] return inputs def _UpperCAmelCase ( self , *snake_case , **snake_case ) -> List[str]: """simple docstring""" lowercase : Optional[Any] = kwargs.pop("""audio""" , __UpperCAmelCase ) lowercase : int = kwargs.pop("""padding_mask""" , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: lowercase : Any = args[0] lowercase : str = args[1:] if audio_values is not None: return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase ) else: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *snake_case , **snake_case ) -> str: """simple docstring""" return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , snake_case , snake_case = None ) -> int: """simple docstring""" lowercase : Any = to_numpy(__UpperCAmelCase ) lowercase : List[str] = audio_values.shape if padding_mask is None: return list(__UpperCAmelCase ) lowercase : Optional[int] = to_numpy(__UpperCAmelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowercase : Optional[Any] = seq_len - padding_mask.shape[-1] lowercase : List[Any] = 1 - self.feature_extractor.padding_value lowercase : Tuple = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCAmelCase ) lowercase : Optional[int] = audio_values.tolist() for i in range(__UpperCAmelCase ): lowercase : List[str] = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowercase : Dict = sliced_audio.reshape(__UpperCAmelCase , -1 ) return audio_values
607
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
0
"""simple docstring""" def __lowerCAmelCase ( lowercase : str , lowercase : Union[str, Any] ) -> None: """simple docstring""" snake_case : int = len(lowercase ) print("The following activities are selected:" ) # The first activity is always selected snake_case : List[str] = 0 print(lowercase , end="," ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase , end="," ) snake_case : List[str] = j if __name__ == "__main__": import doctest doctest.testmod() __snake_case = [1, 3, 0, 5, 8, 5] __snake_case = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
178
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __lowerCamelCase : Optional[int] = Mapping[str, np.ndarray] __lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. __lowerCamelCase : Optional[Any] = 0.01 @dataclasses.dataclass(frozen=_lowercase ) class __magic_name__ : lowercase : List[Any] =42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowercase : List[Any] =42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowercase : int =42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowercase : Tuple =42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowercase : str =42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowercase : List[str] =None # Optional remark about the protein. Included as a comment in output PDB # files lowercase : List[str] =None # Templates used to generate this protein (prediction-only) lowercase : Dict =None # Chain corresponding to each parent lowercase : Optional[int] =None def lowerCamelCase_(lowerCamelCase_ ) -> Protein: UpperCAmelCase = R'''(\[[A-Z]+\]\n)''' UpperCAmelCase = [tag.strip() for tag in re.split(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0] UpperCAmelCase = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) UpperCAmelCase = ["N", "CA", "C"] UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None for g in groups: if "[PRIMARY]" == g[0]: UpperCAmelCase = g[1][0].strip() for i in range(len(lowerCamelCase_ ) ): if seq[i] not in residue_constants.restypes: UpperCAmelCase = '''X''' # FIXME: strings are immutable UpperCAmelCase = np.array( [residue_constants.restype_order.get(lowerCamelCase_ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: UpperCAmelCase = [] for axis in range(3 ): tertiary.append(list(map(lowerCamelCase_ , g[1][axis].split() ) ) ) UpperCAmelCase = np.array(lowerCamelCase_ ) UpperCAmelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(lowerCamelCase_ ): UpperCAmelCase = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: UpperCAmelCase = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) UpperCAmelCase = np.zeros( ( len(lowerCamelCase_ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(lowerCamelCase_ ): UpperCAmelCase = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=lowerCamelCase_ , atom_mask=lowerCamelCase_ , aatype=lowerCamelCase_ , residue_index=np.arange(len(lowerCamelCase_ ) ) , b_factors=lowerCamelCase_ , ) def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ = 0 ) -> List[str]: UpperCAmelCase = [] UpperCAmelCase = prot.remark if remark is not None: pdb_headers.append(F'REMARK {remark}' ) UpperCAmelCase = prot.parents UpperCAmelCase = prot.parents_chain_index if parents is not None and parents_chain_index is not None: UpperCAmelCase = [p for i, p in zip(lowerCamelCase_ , lowerCamelCase_ ) if i == chain_id] if parents is None or len(lowerCamelCase_ ) == 0: UpperCAmelCase = ['''N/A'''] pdb_headers.append(F'PARENT {" ".join(lowerCamelCase_ )}' ) return pdb_headers def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> str: UpperCAmelCase = [] UpperCAmelCase = pdb_str.split("\n" ) UpperCAmelCase = prot.remark if remark is not None: out_pdb_lines.append(F'REMARK {remark}' ) UpperCAmelCase = 42 if prot.parents is not None and len(prot.parents ) > 0: UpperCAmelCase = [] if prot.parents_chain_index is not None: UpperCAmelCase = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(lowerCamelCase_ ) , [] ) parent_dict[str(lowerCamelCase_ )].append(lowerCamelCase_ ) UpperCAmelCase = max([int(lowerCamelCase_ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): UpperCAmelCase = parent_dict.get(str(lowerCamelCase_ ) , ["N/A"] ) parents_per_chain.append(lowerCamelCase_ ) else: parents_per_chain.append(list(prot.parents ) ) else: UpperCAmelCase = [['''N/A''']] def make_parent_line(lowerCamelCase_ ) -> str: return F'PARENT {" ".join(lowerCamelCase_ )}' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) UpperCAmelCase = 0 for i, l in enumerate(lowerCamelCase_ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(lowerCamelCase_ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(lowerCamelCase_ ): UpperCAmelCase = parents_per_chain[chain_counter] else: UpperCAmelCase = ['''N/A'''] out_pdb_lines.append(make_parent_line(lowerCamelCase_ ) ) return "\n".join(lowerCamelCase_ ) def lowerCamelCase_(lowerCamelCase_ ) -> str: UpperCAmelCase = residue_constants.restypes + ['''X'''] def res_atoa(lowerCamelCase_ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) UpperCAmelCase = residue_constants.atom_types UpperCAmelCase = [] UpperCAmelCase = prot.atom_mask UpperCAmelCase = prot.aatype UpperCAmelCase = prot.atom_positions UpperCAmelCase = prot.residue_index.astype(np.intaa ) UpperCAmelCase = prot.b_factors UpperCAmelCase = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) UpperCAmelCase = get_pdb_headers(lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: pdb_lines.extend(lowerCamelCase_ ) UpperCAmelCase = aatype.shape[0] UpperCAmelCase = 1 UpperCAmelCase = 0 UpperCAmelCase = string.ascii_uppercase UpperCAmelCase = None # Add all atom sites. for i in range(lowerCamelCase_ ): UpperCAmelCase = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(lowerCamelCase_ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue UpperCAmelCase = '''ATOM''' UpperCAmelCase = atom_name if len(lowerCamelCase_ ) == 4 else F' {atom_name}' UpperCAmelCase = '''''' UpperCAmelCase = '''''' UpperCAmelCase = 1.00 UpperCAmelCase = atom_name[0] # Protein supports only C, N, O, S, this works. UpperCAmelCase = '''''' UpperCAmelCase = '''A''' if chain_index is not None: UpperCAmelCase = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! UpperCAmelCase = ( F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}' F'{res_name_a:>3} {chain_tag:>1}' F'{residue_index[i]:>4}{insertion_code:>1} ' F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}' F'{occupancy:>6.2f}{b_factor:>6.2f} ' F'{element:>2}{charge:>2}' ) pdb_lines.append(lowerCamelCase_ ) atom_index += 1 UpperCAmelCase = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: UpperCAmelCase = True UpperCAmelCase = chain_index[i + 1] if should_terminate: # Close the chain. UpperCAmelCase = '''TER''' UpperCAmelCase = ( F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}' ) pdb_lines.append(lowerCamelCase_ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(lowerCamelCase_ , lowerCamelCase_ ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(lowerCamelCase_ ) def lowerCamelCase_(lowerCamelCase_ ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Protein: return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=lowerCamelCase_ , remark=lowerCamelCase_ , parents=lowerCamelCase_ , parents_chain_index=lowerCamelCase_ , )
323
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCamelCase__ : Dict = False class __magic_name__ (unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:Any ): snake_case__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) snake_case__ = '''A painting of a squirrel eating a burger ''' snake_case__ = torch.manual_seed(0 ) snake_case__ = pipe( prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__UpperCAmelCase ) snake_case__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) snake_case__ = generator.manual_seed(0 ) snake_case__ = pipe( prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) snake_case__ = '''A painting of a squirrel eating a burger ''' snake_case__ = torch.manual_seed(0 ) snake_case__ = pipe( prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
33
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _UpperCamelCase (): """simple docstring""" UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( """-m""" , """--pretrained_model_name_or_path""" , type=a__ , default=a__ , required=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , ) parser.add_argument( """-c""" , """--caption""" , type=a__ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , ) parser.add_argument( """-n""" , """--images_num""" , type=a__ , default=4 , help="""How much images to generate.""" , ) parser.add_argument( """-s""" , """--seed""" , type=a__ , default=42 , help="""Seed for random process.""" , ) parser.add_argument( """-ci""" , """--cuda_id""" , type=a__ , default=0 , help="""cuda_id.""" , ) UpperCamelCase__ = parser.parse_args() return args def _UpperCamelCase (a__ :Optional[int] , a__ :Any , a__ :int ): """simple docstring""" if not len(a__ ) == rows * cols: raise ValueError("""The specified number of rows and columns are not correct.""" ) UpperCamelCase__ = imgs[0].size UpperCamelCase__ = Image.new("""RGB""" , size=(cols * w, rows * h) ) UpperCamelCase__ = grid.size for i, img in enumerate(a__ ): grid.paste(a__ , box=(i % cols * w, i // cols * h) ) return grid def _UpperCamelCase (a__ :List[str] , a__ :int="robotic cat with wings" , a__ :str=7.5 , a__ :Dict=50 , a__ :Optional[int]=1 , a__ :Dict=42 , ): """simple docstring""" UpperCamelCase__ = torch.Generator(pipeline.device ).manual_seed(a__ ) UpperCamelCase__ = pipeline( a__ , guidance_scale=a__ , num_inference_steps=a__ , generator=a__ , num_images_per_prompt=a__ , ).images UpperCamelCase__ = int(math.sqrt(a__ ) ) UpperCamelCase__ = image_grid(a__ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images UpperCamelCase__ = parse_args() # Load models and create wrapper for stable diffusion UpperCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") UpperCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") UpperCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") UpperCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") UpperCamelCase__ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) UpperCamelCase__ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): UpperCamelCase__ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: UpperCamelCase__ = unet.to(torch.device("cuda", args.cuda_id)) UpperCamelCase__ = pipeline.to(unet.device) UpperCamelCase__ , UpperCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) UpperCamelCase__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
619
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: lowercase_ : Any = None lowercase_ : int = logging.get_logger(__name__) lowercase_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase_ : Optional[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } lowercase_ : Dict = { 'google/bigbird-roberta-base': 4_0_9_6, 'google/bigbird-roberta-large': 4_0_9_6, 'google/bigbird-base-trivia-itc': 4_0_9_6, } lowercase_ : Any = '▁' class _lowerCamelCase ( _lowercase ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = BigBirdTokenizer __a = ["input_ids", "attention_mask"] __a = [] def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<unk>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[MASK]" , lowerCAmelCase="[CLS]" , **lowerCAmelCase , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: int= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token SCREAMING_SNAKE_CASE__: Optional[Any]= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token SCREAMING_SNAKE_CASE__: Tuple= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token SCREAMING_SNAKE_CASE__: Tuple= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token SCREAMING_SNAKE_CASE__: Tuple= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token SCREAMING_SNAKE_CASE__: int= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__: List[Any]= AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__: Optional[int]= vocab_file SCREAMING_SNAKE_CASE__: str= False if not self.vocab_file else True def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> int: SCREAMING_SNAKE_CASE__: Dict= [self.sep_token_id] SCREAMING_SNAKE_CASE__: int= [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ) -> List[str]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple: SCREAMING_SNAKE_CASE__: int= [self.sep_token_id] SCREAMING_SNAKE_CASE__: int= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[Any]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return SCREAMING_SNAKE_CASE__: Optional[int]= os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
64
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = BlipImageProcessor() __lowerCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) __lowerCAmelCase = BlipProcessor(__UpperCAmelCase , __UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def snake_case ( self , **__a ): return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer def snake_case ( self , **__a ): return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor def snake_case ( self ): shutil.rmtree(self.tmpdirname ) def snake_case ( self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def snake_case ( self ): __lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __lowerCAmelCase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) __lowerCAmelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__UpperCAmelCase , return_tensors="np" ) __lowerCAmelCase = processor(images=__UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case ( self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__UpperCAmelCase ) __lowerCAmelCase = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case ( self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def snake_case ( self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__UpperCAmelCase ) __lowerCAmelCase = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
636
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
0
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : def __init__( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=13 , _lowercase : str=30 , _lowercase : Optional[Any]=2 , _lowercase : List[Any]=3 , _lowercase : List[Any]=True , _lowercase : List[str]=True , _lowercase : Tuple=32 , _lowercase : Optional[Any]=5 , _lowercase : List[Any]=4 , _lowercase : Optional[int]=37 , _lowercase : Union[str, Any]="gelu" , _lowercase : List[str]=0.1 , _lowercase : str=0.1 , _lowercase : Dict=10 , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=None , ): __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = image_size __UpperCAmelCase = patch_size __UpperCAmelCase = num_channels __UpperCAmelCase = is_training __UpperCAmelCase = use_labels __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = intermediate_size __UpperCAmelCase = hidden_act __UpperCAmelCase = hidden_dropout_prob __UpperCAmelCase = attention_probs_dropout_prob __UpperCAmelCase = type_sequence_label_size __UpperCAmelCase = initializer_range __UpperCAmelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase = (image_size // patch_size) ** 2 __UpperCAmelCase = num_patches + 1 def a ( self : Any ): __UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase = None if self.use_labels: __UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase = self.get_config() return config, pixel_values, labels def a ( self : Optional[Any] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ): __UpperCAmelCase = ViTMSNModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : Union[str, Any] ): __UpperCAmelCase = self.type_sequence_label_size __UpperCAmelCase = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCAmelCase = 1 __UpperCAmelCase = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCAmelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.prepare_config_and_inputs() __UpperCAmelCase = config_and_inputs __UpperCAmelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): a__ : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () a__ : Dict = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) a__ : Optional[int] = False a__ : Union[str, Any] = False a__ : Any = False a__ : Any = False def a ( self : Any ): __UpperCAmelCase = ViTMSNModelTester(self ) __UpperCAmelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def a ( self : Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def a ( self : List[Any] ): pass def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def a ( self : int ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase = model_class(__UpperCAmelCase ) __UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase = [*signature.parameters.keys()] __UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def a ( self : Optional[Any] ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self : Any ): __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def a ( self : Union[str, Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase = ViTMSNModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowercase__ ( ): __UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): @cached_property def a ( self : str ): return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def a ( self : Any ): torch.manual_seed(2 ) __UpperCAmelCase = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__UpperCAmelCase ) __UpperCAmelCase = self.default_image_processor __UpperCAmelCase = prepare_img() __UpperCAmelCase = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCAmelCase = model(**__UpperCAmelCase ) # verify the logits __UpperCAmelCase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __UpperCAmelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
49
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a : Union[str, Any] = logging.get_logger(__name__) _a : int = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class lowercase_ ( _lowercase ): '''simple docstring''' __lowerCAmelCase : Optional[Any] = "table-transformer" __lowerCAmelCase : Any = ["past_key_values"] __lowerCAmelCase : List[str] = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , a_=True , a_=None , a_=3 , a_=1_0_0 , a_=6 , a_=2_0_4_8 , a_=8 , a_=6 , a_=2_0_4_8 , a_=8 , a_=0.0 , a_=0.0 , a_=True , a_="relu" , a_=2_5_6 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1.0 , a_=False , a_="sine" , a_="resnet50" , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=1 , a_=1 , a_=5 , a_=2 , a_=0.1 , **a_ , ) -> Optional[int]: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['stage4'] ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase = backbone_config.get('model_type' ) UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase = config_class.from_dict(__UpperCAmelCase ) # set timm attributes to None UpperCAmelCase = None, None, None UpperCAmelCase = use_timm_backbone UpperCAmelCase = backbone_config UpperCAmelCase = num_channels UpperCAmelCase = num_queries UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = init_xavier_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = encoder_layers UpperCAmelCase = auxiliary_loss UpperCAmelCase = position_embedding_type UpperCAmelCase = backbone UpperCAmelCase = use_pretrained_backbone UpperCAmelCase = dilation # Hungarian matcher UpperCAmelCase = class_cost UpperCAmelCase = bbox_cost UpperCAmelCase = giou_cost # Loss coefficients UpperCAmelCase = mask_loss_coefficient UpperCAmelCase = dice_loss_coefficient UpperCAmelCase = bbox_loss_coefficient UpperCAmelCase = giou_loss_coefficient UpperCAmelCase = eos_coefficient super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase ) @property def snake_case_ ( self ) -> Dict: """simple docstring""" return self.encoder_attention_heads @property def snake_case_ ( self ) -> List[Any]: """simple docstring""" return self.d_model class lowercase_ ( _lowercase ): '''simple docstring''' __lowerCAmelCase : Any = version.parse("1.11" ) @property def snake_case_ ( self ) -> Optional[Any]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def snake_case_ ( self ) -> Any: """simple docstring""" return 1E-5 @property def snake_case_ ( self ) -> Union[str, Any]: """simple docstring""" return 1_2
447
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
0
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str]=7 ) -> Any: '''simple docstring''' UpperCAmelCase_ = None if token is not None: UpperCAmelCase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} # The id of a workflow (not of a workflow run) UpperCAmelCase_ = '''636036''' UpperCAmelCase_ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" UpperCAmelCase_ = requests.get(snake_case_ , headers=snake_case_ ).json() return result["workflow_runs"] def lowerCAmelCase_ ( snake_case_ : List[str] ) -> int: '''simple docstring''' UpperCAmelCase_ = get_daily_ci_runs(snake_case_ ) UpperCAmelCase_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": UpperCAmelCase_ = workflow_run['''id'''] break return workflow_run_id def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = get_last_daily_ci_runs(snake_case_ ) if workflow_run_id is not None: UpperCAmelCase_ = get_artifacts_links(worflow_run_id=snake_case_ , token=snake_case_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: UpperCAmelCase_ = artifacts_links[artifact_name] download_artifact( artifact_name=snake_case_ , artifact_url=snake_case_ , output_dir=snake_case_ , token=snake_case_ ) def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str] ) -> int: '''simple docstring''' get_last_daily_ci_artifacts(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = {} for artifact_name in artifact_names: UpperCAmelCase_ = os.path.join(snake_case_ , f"""{artifact_name}.zip""" ) if os.path.isfile(snake_case_ ): UpperCAmelCase_ = {} with zipfile.ZipFile(snake_case_ ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case_ ): # read the file with z.open(snake_case_ ) as f: UpperCAmelCase_ = f.read().decode("UTF-8" ) return results
78
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
0
SCREAMING_SNAKE_CASE :Tuple = {str(digit): digit**5 for digit in range(10)} def _lowerCAmelCase ( lowerCAmelCase_ :str )->int: '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase_ ) ) def _lowerCAmelCase ( )->int: '''simple docstring''' return sum( number for number in range(1_000 , 1_000_000 ) if number == digits_fifth_powers_sum(lowerCAmelCase_ ) ) if __name__ == "__main__": print(solution())
283
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
0
"""simple docstring""" def __snake_case ( __A ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(__A ,__A ): raise TypeError("""Input value must be a \'int\' type""" ) return bin(__A ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
607
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
"""simple docstring""" import requests __snake_case = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=""" def __lowerCAmelCase ( lowercase : Dict ) -> None: """simple docstring""" snake_case : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'{i}.) {article["title"]}' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
178
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
323
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
0
'''simple docstring''' from timeit import timeit def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase : int = 0 while number: number &= number - 1 result += 1 return result def lowercase ( __magic_name__ ): '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase : Optional[Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def lowercase ( ): '''simple docstring''' def do_benchmark(__magic_name__ ) -> None: UpperCAmelCase : Optional[int] = "import __main__ as z" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__magic_name__ ) = }" ) UpperCAmelCase : Union[str, Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__magic_name__ ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__magic_name__ ) = }" ) UpperCAmelCase : str = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__magic_name__ , ) print(F"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__magic_name__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
679
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowercase ( __magic_name__="" ): '''simple docstring''' UpperCAmelCase : Dict = tempfile.mkdtemp() return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 UpperCAmelCase : int = AgentAudio(snake_case ) UpperCAmelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(snake_case ) ) # Ensure that the file contains the same value as the original tensor UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case ) self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 UpperCAmelCase : Any = get_new_path(suffix=".wav" ) sf.write(snake_case , snake_case , 1_6_0_0_0 ) UpperCAmelCase : Optional[Any] = AgentAudio(snake_case ) self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , snake_case ) @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) ) UpperCAmelCase : Tuple = AgentImage(snake_case ) UpperCAmelCase : Tuple = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCAmelCase : Any = Image.open(snake_case ) UpperCAmelCase : List[str] = AgentImage(snake_case ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCAmelCase : Dict = Image.open(snake_case ) UpperCAmelCase : int = AgentImage(snake_case ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = "Hey!" UpperCAmelCase : Tuple = AgentText(snake_case ) self.assertEqual(snake_case , agent_type.to_string() ) self.assertEqual(snake_case , agent_type.to_raw() ) self.assertEqual(snake_case , snake_case )
679
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a : List[str] = None a : Union[str, Any] = logging.get_logger(__name__) a : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} a : Dict = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } a : str = { "facebook/nllb-large-en-ro": 10_24, "facebook/nllb-200-distilled-600M": 10_24, } # fmt: off a : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE__ : str = NllbTokenizer SCREAMING_SNAKE_CASE__ : List[int] = [] SCREAMING_SNAKE_CASE__ : List[int] = [] def __init__( self , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case=False , **snake_case , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token UpperCAmelCase : int = legacy_behaviour super().__init__( vocab_file=snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , src_lang=snake_case , tgt_lang=snake_case , additional_special_tokens=snake_case , legacy_behaviour=snake_case , **snake_case , ) UpperCAmelCase : Optional[Any] = vocab_file UpperCAmelCase : Union[str, Any] = False if not self.vocab_file else True UpperCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase : List[str] = { lang_code: self.convert_tokens_to_ids(snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase : Union[str, Any] = src_lang if src_lang is not None else "eng_Latn" UpperCAmelCase : Dict = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A_ ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : List[str] = [self.sep_token_id] UpperCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A_ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase : Any = src_lang UpperCAmelCase : Any = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case ) UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(snake_case ) UpperCAmelCase : Any = tgt_lang_id return inputs def A_ ( self , snake_case , snake_case = "eng_Latn" , snake_case = None , snake_case = "fra_Latn" , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = src_lang UpperCAmelCase : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case ) def A_ ( self ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def A_ ( self ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case ) if self.legacy_behaviour: UpperCAmelCase : Optional[int] = [] UpperCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : Dict = [self.cur_lang_code] UpperCAmelCase : Union[str, Any] = [self.eos_token_id] UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.convert_tokens_to_ids(snake_case ) if self.legacy_behaviour: UpperCAmelCase : Optional[int] = [] UpperCAmelCase : int = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase : List[Any] = [self.cur_lang_code] UpperCAmelCase : Union[str, Any] = [self.eos_token_id] UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory." ) return UpperCAmelCase : List[Any] = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ): copyfile(self.vocab_file , snake_case ) return (out_vocab_file,)
679
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' def get_masked_lm_array(__magic_name__ ): UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : str = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_array(__magic_name__ ): UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : str = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_layer_array(__magic_name__ , __magic_name__ ): UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : Optional[int] = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ): UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = array.reshape(__magic_name__ ) if "kernel" in name: UpperCAmelCase : Optional[Any] = array.transpose() return torch.from_numpy(__magic_name__ ) print(F"Loading model based on config from {config_path}..." ) UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ ) UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention UpperCAmelCase : BertSelfAttention = layer.attention.self UpperCAmelCase : List[Any] = get_encoder_attention_layer_array( __magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape ) UpperCAmelCase : Tuple = get_encoder_attention_layer_array( __magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape ) UpperCAmelCase : int = get_encoder_attention_layer_array( __magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape ) UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array( __magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape ) UpperCAmelCase : Tuple = get_encoder_attention_layer_array( __magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape ) UpperCAmelCase : str = get_encoder_attention_layer_array( __magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape ) # Self-attention Output UpperCAmelCase : BertSelfOutput = layer.attention.output UpperCAmelCase : str = get_encoder_attention_layer_array( __magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape ) UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array( __magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape ) UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" ) UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" ) # Intermediate UpperCAmelCase : BertIntermediate = layer.intermediate UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" ) UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" ) # Output UpperCAmelCase : BertOutput = layer.output UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" ) UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" ) UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" ) UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" ) # Embeddings UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" ) UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" ) UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" ) UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" ) # LM Head UpperCAmelCase : str = model.cls.predictions.transform UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" ) UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" ) UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" ) UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" ) UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" ) # Pooling UpperCAmelCase : str = BertPooler(config=__magic_name__ ) UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" ) UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" ) # Export final model model.save_pretrained(__magic_name__ ) # Integration test - should load without any errors ;) UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ ) print(new_model.eval() ) print("Model conversion was done sucessfully!" ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) a : Any = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
679
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : List[str] = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: SCREAMING_SNAKE_CASE__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: SCREAMING_SNAKE_CASE__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" ) UpperCAmelCase : Optional[Any] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] ) UpperCAmelCase : int = text_classifier("This is great !" , top_k=2 ) self.assertEqual( nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) UpperCAmelCase : List[Any] = text_classifier(["This is great !", "This is bad"] , top_k=2 ) self.assertEqual( nested_simplify(snake_case ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) UpperCAmelCase : Optional[Any] = text_classifier("This is great !" , top_k=1 ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] ) # Legacy behavior UpperCAmelCase : Optional[Any] = text_classifier("This is great !" , return_all_scores=snake_case ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] ) UpperCAmelCase : int = text_classifier("This is great !" , return_all_scores=snake_case ) self.assertEqual( nested_simplify(snake_case ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) UpperCAmelCase : str = text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case ) self.assertEqual( nested_simplify(snake_case ) , [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ] , ) UpperCAmelCase : List[Any] = text_classifier(["This is great !", "Something else"] , return_all_scores=snake_case ) self.assertEqual( nested_simplify(snake_case ) , [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ] , ) @require_torch def A_ ( self ): '''simple docstring''' import torch UpperCAmelCase : Any = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , ) UpperCAmelCase : int = text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] ) @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = pipeline( task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" ) UpperCAmelCase : Any = text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "LABEL_0", "score": 0.504}] ) @slow @require_torch def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = pipeline("text-classification" ) UpperCAmelCase : Any = text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCAmelCase : Any = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCAmelCase : Dict = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 0.988}] ) @slow @require_tf def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = pipeline("text-classification" , framework="tf" ) UpperCAmelCase : Union[str, Any] = text_classifier("This is great !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 1.0}] ) UpperCAmelCase : int = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "NEGATIVE", "score": 1.0}] ) UpperCAmelCase : int = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(snake_case ) , [{"label": "POSITIVE", "score": 0.988}] ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = TextClassificationPipeline(model=snake_case , tokenizer=snake_case ) return text_classifier, ["HuggingFace is in", "This is another test"] def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase : Any = "HuggingFace is in" UpperCAmelCase : Any = text_classifier(snake_case ) self.assertEqual(nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) UpperCAmelCase : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"] UpperCAmelCase : Any = text_classifier(snake_case ) self.assertEqual( nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}, {"label": ANY(snake_case ), "score": ANY(snake_case )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase : Optional[int] = text_classifier(snake_case , top_k=snake_case ) UpperCAmelCase : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(snake_case ) , [[{"label": ANY(snake_case ), "score": ANY(snake_case )}] * N, [{"label": ANY(snake_case ), "score": ANY(snake_case )}] * N] , ) UpperCAmelCase : List[str] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} UpperCAmelCase : List[str] = text_classifier(snake_case ) self.assertEqual( nested_simplify(snake_case ) , {"label": ANY(snake_case ), "score": ANY(snake_case )} , ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase : Dict = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(snake_case ): text_classifier(snake_case ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase : str = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(snake_case ) , [{"label": ANY(snake_case ), "score": ANY(snake_case )}] , ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
679
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path a : str = "src/transformers" # Matches is_xxx_available() a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], a : List[str] = re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: a : Union[str, Any] = re.compile(R"^\s*try:") # Catches a line with else: a : Tuple = re.compile(R"^\s*else:") def lowercase ( __magic_name__ ): '''simple docstring''' if _re_test_backend.search(__magic_name__ ) is None: return None UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )] backends.sort() return "_and_".join(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase : str = f.readlines() UpperCAmelCase : Optional[int] = 0 while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__magic_name__ ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase : str = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: UpperCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__magic_name__ ): UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0] UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ ) if single_line_import_search is not None: UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase : Dict = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : List[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): UpperCAmelCase : List[str] = lines[line_index] if _re_import_struct_add_one.search(__magic_name__ ) is not None: objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] ) elif _re_import_struct_add_many.search(__magic_name__ ) is not None: UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " ) UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif _re_between_brackets.search(__magic_name__ ) is not None: UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " ) UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif _re_quote_object.search(__magic_name__ ) is not None: objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase : List[str] = [] while ( line_index < len(__magic_name__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): UpperCAmelCase : int = lines[line_index] UpperCAmelCase : Tuple = _re_import.search(__magic_name__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase : Optional[Any] = {"none": objects} # Let's continue with backend-specific objects while line_index < len(__magic_name__ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase : Optional[int] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): UpperCAmelCase : str = lines[line_index] UpperCAmelCase : Tuple = _re_import.search(__magic_name__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase : Dict = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' def find_duplicates(__magic_name__ ): return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase : Tuple = [] for key in import_dict_objects.keys(): UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = [] for root, _, files in os.walk(__magic_name__ ): if "__init__.py" in files: UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" ) UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ ) if objects is not None: UpperCAmelCase : int = analyze_results(*__magic_name__ ) if len(__magic_name__ ) > 0: UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(__magic_name__ ) ) if len(__magic_name__ ) > 0: raise ValueError("\n\n".join(__magic_name__ ) ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [] for path, directories, files in os.walk(__magic_name__ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(__magic_name__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0: continue UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) ) UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." ) submodules.append(__magic_name__ ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) ) UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(__magic_name__ ) return submodules a : str = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = importlib.util.spec_from_file_location( "transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCAmelCase : Optional[int] = spec.loader.load_module() UpperCAmelCase : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__magic_name__ ) > 0: UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
679
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ): '''simple docstring''' UpperCAmelCase : Any = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : int = is_training UpperCAmelCase : Any = use_attention_mask UpperCAmelCase : int = use_token_type_ids UpperCAmelCase : str = use_labels UpperCAmelCase : Union[str, Any] = vocab_size UpperCAmelCase : Optional[Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Dict = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : Any = type_sequence_label_size UpperCAmelCase : Optional[int] = initializer_range UpperCAmelCase : int = num_choices def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[Any] = None if self.use_attention_mask: UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : int = None if self.use_token_type_ids: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase : Optional[Any] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = config_and_inputs UpperCAmelCase : Dict = True UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : int = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = FlaxRobertaPreLayerNormModelTester(self ) @slow def A_ ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: UpperCAmelCase : Tuple = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case ) UpperCAmelCase : Dict = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case ) @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case ) UpperCAmelCase : Any = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) UpperCAmelCase : List[str] = model(snake_case )[0] UpperCAmelCase : Optional[Any] = [1, 1_1, 5_0_2_6_5] self.assertEqual(list(output.shape ) , snake_case ) # compare the actual values for a slice. UpperCAmelCase : List[Any] = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case ) UpperCAmelCase : Tuple = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa ) UpperCAmelCase : List[str] = model(snake_case )[0] # compare the actual values for a slice. UpperCAmelCase : Dict = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
679
'''simple docstring''' import os def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) ) UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" ) with open(__magic_name__ ) as f: UpperCAmelCase : str = f.readlines() UpperCAmelCase : Optional[int] = [] for line in triangle: UpperCAmelCase : List[str] = [] for number in line.strip().split(" " ): numbers_from_line.append(int(__magic_name__ ) ) a.append(__magic_name__ ) for i in range(1 , len(__magic_name__ ) ): for j in range(len(a[i] ) ): UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__magic_name__ , __magic_name__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
679
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a : Any = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
679
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if n == 1 or not isinstance(__magic_name__ , __magic_name__ ): return 0 elif n == 2: return 1 else: UpperCAmelCase : Optional[int] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Union[str, Any] = 2 while digits < n: index += 1 UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) ) return index def lowercase ( __magic_name__ = 1000 ): '''simple docstring''' return fibonacci_digits_index(__magic_name__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
679
1
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a : Optional[int] = logging.get_logger(__name__) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[Any] = to_pil_image(__magic_name__ ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = pil_image.size UpperCAmelCase : Dict = pytesseract.image_to_data(__magic_name__ , lang=__magic_name__ , output_type="dict" , config=__magic_name__ ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates UpperCAmelCase : Any = [idx for idx, word in enumerate(__magic_name__ ) if not word.strip()] UpperCAmelCase : Optional[Any] = [word for idx, word in enumerate(__magic_name__ ) if idx not in irrelevant_indices] UpperCAmelCase : Optional[int] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices] UpperCAmelCase : int = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices] UpperCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices] UpperCAmelCase : Tuple = [coord for idx, coord in enumerate(__magic_name__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase : List[Any] = [] for x, y, w, h in zip(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): UpperCAmelCase : Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(__magic_name__ ) # finally, normalize the bounding boxes UpperCAmelCase : Any = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__magic_name__ , __magic_name__ , __magic_name__ ) ) assert len(__magic_name__ ) == len(__magic_name__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = ["pixel_values"] def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Tuple = size if size is not None else {"height": 2_2_4, "width": 2_2_4} UpperCAmelCase : Union[str, Any] = get_size_dict(snake_case ) UpperCAmelCase : Dict = do_resize UpperCAmelCase : str = size UpperCAmelCase : List[Any] = resample UpperCAmelCase : Tuple = do_rescale UpperCAmelCase : Tuple = rescale_value UpperCAmelCase : List[Any] = do_normalize UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase : Optional[int] = apply_ocr UpperCAmelCase : int = ocr_lang UpperCAmelCase : Dict = tesseract_config def A_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) UpperCAmelCase : List[Any] = (size["height"], size["width"]) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ): '''simple docstring''' return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case=None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase : str = size if size is not None else self.size UpperCAmelCase : Optional[int] = get_size_dict(snake_case ) UpperCAmelCase : List[str] = resample if resample is not None else self.resample UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std UpperCAmelCase : List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase : Optional[int] = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase : int = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. UpperCAmelCase : int = [to_numpy_array(snake_case ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , "pytesseract" ) UpperCAmelCase : Tuple = [] UpperCAmelCase : str = [] for image in images: UpperCAmelCase , UpperCAmelCase : Optional[int] = apply_tesseract(snake_case , snake_case , snake_case ) words_batch.append(snake_case ) boxes_batch.append(snake_case ) if do_resize: UpperCAmelCase : Dict = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] if do_rescale: UpperCAmelCase : Optional[Any] = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: UpperCAmelCase : Optional[int] = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] UpperCAmelCase : int = [to_channel_dimension_format(snake_case , snake_case ) for image in images] UpperCAmelCase : Optional[int] = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case ) if apply_ocr: UpperCAmelCase : Tuple = words_batch UpperCAmelCase : Optional[int] = boxes_batch return data
679
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint a : List[str] = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } a : Dict = { "169M": 7_68, "430M": 10_24, "1B5": 20_48, "3B": 25_60, "7B": 40_96, "14B": 51_20, } def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase : str = state_dict.pop(__magic_name__ ) # emb -> embedding if name.startswith("emb." ): UpperCAmelCase : str = name.replace("emb." , "embeddings." ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0" ): UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" ) # att -> attention UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ ) # ffn -> feed_forward UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k" ): UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" ) # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v" ): UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" ) # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r" ): UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" ) if name != "head.weight": UpperCAmelCase : List[str] = "rwkv." + name UpperCAmelCase : List[Any] = weight return state_dict def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ): '''simple docstring''' if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer." ) UpperCAmelCase : List[str] = 5_0277 UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" ) else: UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ ) UpperCAmelCase : List[Any] = len(__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) # 2. Build the config UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase : Union[str, Any] = candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument." ) if size not in possible_sizes: raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." ) UpperCAmelCase : str = RwkvConfig( vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__magic_name__ ) # 3. Download model file then convert state_dict UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ ) # 4. Split in shards and save UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ ) for shard_file, shard in shards.items(): torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) if index is not None: UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) # Save the index as well with open(__magic_name__ , "w" , encoding="utf-8" ) as f: UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) UpperCAmelCase : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub." ) UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ ) model.push_to_hub(__magic_name__ , max_shard_size="2GB" ) tokenizer.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) a : Dict = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
679
1
'''simple docstring''' from math import pi, sqrt def lowercase ( __magic_name__ ): '''simple docstring''' if num <= 0: raise ValueError("math domain error" ) if num > 1_7_1.5: raise OverflowError("math range error" ) elif num - int(__magic_name__ ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(__magic_name__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase ( ): '''simple docstring''' assert gamma(0.5 ) == sqrt(__magic_name__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() a : List[Any] = 1.0 while num: a : str = float(input("Gamma of: ")) print(F'gamma({num}) = {gamma(num)}') print("\nEnter 0 to exit...")
679
'''simple docstring''' def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
679
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase : int = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase : List[Any] = model(snake_case , labels=snake_case ).loss UpperCAmelCase : Any = -tf.math.reduce_mean(snake_case ).numpy() UpperCAmelCase : Any = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
679
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : Optional[Any] = "pt" elif is_tf_available(): a : List[Any] = "tf" else: a : List[Any] = "jax" class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer SCREAMING_SNAKE_CASE__ : List[str] = False def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : List[str] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A_ ( self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def A_ ( self , **snake_case ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for i in range(len(snake_case ) ): try: UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) ) UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) ) if max_length is not None and len(snake_case ) > max_length: UpperCAmelCase : Optional[Any] = toks[:max_length] if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0: while len(snake_case ) < min_length: UpperCAmelCase : Any = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase : Dict = [t[0] for t in toks] # Ensure consistency UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) if " " not in output_txt and len(snake_case ) > 1: UpperCAmelCase : Dict = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case ) ) if with_prefix_space: UpperCAmelCase : Union[str, Any] = " " + output_txt UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case ) return output_txt, output_ids def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer UpperCAmelCase : Tuple = "Unicode €." UpperCAmelCase : int = tokenizer(snake_case ) UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" ) UpperCAmelCase : Tuple = tokenizer("e è é ê ë" ) UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Dict = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) if FRAMEWORK != "jax": UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(snake_case , snake_case ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , snake_case ) self.assertIn("attention_mask" , snake_case ) self.assertNotIn("decoder_input_ids" , snake_case ) self.assertNotIn("decoder_attention_mask" , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : int = [ "Summary of the text.", "Another summary.", ] UpperCAmelCase : List[Any] = tokenizer( text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case ) self.assertEqual(3_2 , targets["input_ids"].shape[1] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test UpperCAmelCase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : Dict = tempfile.mkdtemp() UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) shutil.rmtree(snake_case ) UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : str = tempfile.mkdtemp() UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) UpperCAmelCase : int = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case ) with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Union[str, Any] = json.load(snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Any = json.load(snake_case ) UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )] UpperCAmelCase : List[Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] UpperCAmelCase : List[str] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained( snake_case , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )] UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained( snake_case , additional_special_tokens=snake_case , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , "�" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case ) self.assertIsInstance(snake_case , snake_case )
679
1
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowercase ( __magic_name__ , __magic_name__="shi-labs/oneformer_demo" ): '''simple docstring''' with open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) as f: UpperCAmelCase : str = json.load(__magic_name__ ) UpperCAmelCase : Optional[Any] = {} UpperCAmelCase : List[str] = [] UpperCAmelCase : List[Any] = [] for key, info in class_info.items(): UpperCAmelCase : int = info["name"] class_names.append(info["name"] ) if info["isthing"]: thing_ids.append(int(__magic_name__ ) ) UpperCAmelCase : List[Any] = thing_ids UpperCAmelCase : Tuple = class_names return metadata class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=3_0 , snake_case=4_0_0 , snake_case=None , snake_case=True , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=1_0 , snake_case=False , snake_case=2_5_5 , snake_case="shi-labs/oneformer_demo" , snake_case="ade20k_panoptic.json" , snake_case=1_0 , ): '''simple docstring''' UpperCAmelCase : Tuple = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : Union[str, Any] = min_resolution UpperCAmelCase : Union[str, Any] = max_resolution UpperCAmelCase : List[str] = do_resize UpperCAmelCase : Any = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size UpperCAmelCase : Dict = do_normalize UpperCAmelCase : List[Any] = image_mean UpperCAmelCase : str = image_std UpperCAmelCase : str = class_info_file UpperCAmelCase : Dict = prepare_metadata(snake_case , snake_case ) UpperCAmelCase : Optional[Any] = num_text UpperCAmelCase : Union[str, Any] = repo_path # for the post_process_functions UpperCAmelCase : Union[str, Any] = 2 UpperCAmelCase : int = 1_0 UpperCAmelCase : int = 1_0 UpperCAmelCase : int = 3 UpperCAmelCase : Dict = 4 UpperCAmelCase : Any = num_labels UpperCAmelCase : Union[str, Any] = do_reduce_labels UpperCAmelCase : Tuple = ignore_index def A_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A_ ( self , snake_case , snake_case=False ): '''simple docstring''' if not batched: UpperCAmelCase : Union[str, Any] = image_inputs[0] if isinstance(snake_case , Image.Image ): UpperCAmelCase , UpperCAmelCase : Dict = image.size else: UpperCAmelCase , UpperCAmelCase : Union[str, Any] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase : Dict = int(self.size["shortest_edge"] * h / w ) UpperCAmelCase : Dict = self.size["shortest_edge"] elif w > h: UpperCAmelCase : str = self.size["shortest_edge"] UpperCAmelCase : str = int(self.size["shortest_edge"] * w / h ) else: UpperCAmelCase : List[Any] = self.size["shortest_edge"] UpperCAmelCase : Union[str, Any] = self.size["shortest_edge"] else: UpperCAmelCase : Dict = [] for image in image_inputs: UpperCAmelCase , UpperCAmelCase : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase : str = max(snake_case , key=lambda snake_case : item[0] )[0] UpperCAmelCase : int = max(snake_case , key=lambda snake_case : item[1] )[1] return expected_height, expected_width def A_ ( self ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string SCREAMING_SNAKE_CASE__ : int = image_processing_class def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = OneFormerImageProcessorTester(self ) @property def A_ ( self ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "image_mean" ) ) self.assertTrue(hasattr(snake_case , "image_std" ) ) self.assertTrue(hasattr(snake_case , "do_normalize" ) ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "ignore_index" ) ) self.assertTrue(hasattr(snake_case , "class_info_file" ) ) self.assertTrue(hasattr(snake_case , "num_text" ) ) self.assertTrue(hasattr(snake_case , "repo_path" ) ) self.assertTrue(hasattr(snake_case , "metadata" ) ) self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) UpperCAmelCase : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input UpperCAmelCase : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values UpperCAmelCase , UpperCAmelCase : str = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase , UpperCAmelCase : str = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) UpperCAmelCase : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values UpperCAmelCase , UpperCAmelCase : int = self.image_processing_tester.get_expected_values(snake_case ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case ) UpperCAmelCase : Tuple = image_processor( snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A_ ( self , snake_case=False , snake_case=False , snake_case="np" ): '''simple docstring''' UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase : Dict = self.image_processing_tester.num_labels UpperCAmelCase : str = None UpperCAmelCase : str = None UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case ) if with_segmentation_maps: UpperCAmelCase : int = num_labels if is_instance_map: UpperCAmelCase : Union[str, Any] = list(range(snake_case ) ) * 2 UpperCAmelCase : Optional[int] = dict(enumerate(snake_case ) ) UpperCAmelCase : Tuple = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase : List[str] = [Image.fromarray(snake_case ) for annotation in annotations] UpperCAmelCase : Optional[Any] = image_processor( snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , ) return inputs def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' def common(snake_case=False , snake_case=None ): UpperCAmelCase : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case ) UpperCAmelCase : Optional[Any] = inputs["mask_labels"] UpperCAmelCase : Optional[Any] = inputs["class_labels"] UpperCAmelCase : str = inputs["pixel_values"] UpperCAmelCase : Optional[Any] = inputs["text_inputs"] # check the batch_size for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text ) common() common(is_instance_map=snake_case ) common(is_instance_map=snake_case , segmentation_type="pil" ) common(is_instance_map=snake_case , segmentation_type="pil" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = np.zeros((2_0, 5_0) ) UpperCAmelCase : int = 1 UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[Any] = 1 UpperCAmelCase : str = binary_mask_to_rle(snake_case ) self.assertEqual(len(snake_case ) , 4 ) self.assertEqual(rle[0] , 2_1 ) self.assertEqual(rle[1] , 4_5 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase : List[str] = fature_extractor.post_process_semantic_segmentation(snake_case ) self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase : List[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , ) UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 ) self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("segmentation" in el ) self.assertTrue("segments_info" in el ) self.assertEqual(type(el["segments_info"] ) , snake_case ) self.assertEqual( el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
679
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : str = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer" def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Any = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : List[Any] = hidden_sizes UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : str = layer_norm_eps UpperCAmelCase : int = patch_size UpperCAmelCase : Optional[int] = num_channels UpperCAmelCase : Any = depths UpperCAmelCase : Dict = mlp_expansion_ratio UpperCAmelCase : List[str] = downsamples UpperCAmelCase : List[Any] = dim UpperCAmelCase : Any = key_dim UpperCAmelCase : List[str] = attention_ratio UpperCAmelCase : Union[str, Any] = resolution UpperCAmelCase : List[str] = pool_size UpperCAmelCase : Dict = downsample_patch_size UpperCAmelCase : Optional[int] = downsample_stride UpperCAmelCase : Any = downsample_pad UpperCAmelCase : int = drop_path_rate UpperCAmelCase : Optional[Any] = num_metaad_blocks UpperCAmelCase : List[str] = distillation UpperCAmelCase : int = use_layer_scale UpperCAmelCase : List[str] = layer_scale_init_value UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Any = batch_norm_eps
679
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : Optional[Any] = "pt" elif is_tf_available(): a : List[Any] = "tf" else: a : List[Any] = "jax" class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer SCREAMING_SNAKE_CASE__ : List[str] = False def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : List[str] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A_ ( self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def A_ ( self , **snake_case ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for i in range(len(snake_case ) ): try: UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) ) UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) ) if max_length is not None and len(snake_case ) > max_length: UpperCAmelCase : Optional[Any] = toks[:max_length] if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0: while len(snake_case ) < min_length: UpperCAmelCase : Any = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase : Dict = [t[0] for t in toks] # Ensure consistency UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) if " " not in output_txt and len(snake_case ) > 1: UpperCAmelCase : Dict = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case ) ) if with_prefix_space: UpperCAmelCase : Union[str, Any] = " " + output_txt UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case ) return output_txt, output_ids def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer UpperCAmelCase : Tuple = "Unicode €." UpperCAmelCase : int = tokenizer(snake_case ) UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" ) UpperCAmelCase : Tuple = tokenizer("e è é ê ë" ) UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Dict = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) if FRAMEWORK != "jax": UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(snake_case , snake_case ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , snake_case ) self.assertIn("attention_mask" , snake_case ) self.assertNotIn("decoder_input_ids" , snake_case ) self.assertNotIn("decoder_attention_mask" , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : int = [ "Summary of the text.", "Another summary.", ] UpperCAmelCase : List[Any] = tokenizer( text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case ) self.assertEqual(3_2 , targets["input_ids"].shape[1] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test UpperCAmelCase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : Dict = tempfile.mkdtemp() UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) shutil.rmtree(snake_case ) UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : str = tempfile.mkdtemp() UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) UpperCAmelCase : int = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case ) with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Union[str, Any] = json.load(snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Any = json.load(snake_case ) UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )] UpperCAmelCase : List[Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] UpperCAmelCase : List[str] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained( snake_case , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )] UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained( snake_case , additional_special_tokens=snake_case , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , "�" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case ) self.assertIsInstance(snake_case , snake_case )
679
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : int = batch_size UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : List[str] = embeddings_size UpperCAmelCase : Any = hidden_sizes UpperCAmelCase : int = depths UpperCAmelCase : List[str] = is_training UpperCAmelCase : List[str] = use_labels UpperCAmelCase : int = hidden_act UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : str = scope UpperCAmelCase : str = len(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = None if self.use_labels: UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case ) UpperCAmelCase : int = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case ) UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Optional[int] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = TFResNetModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def A_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self ): '''simple docstring''' return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case ): UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : List[str] = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase : str = layer_type UpperCAmelCase : Optional[Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : str = True check_hidden_states_output(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase : Union[str, Any] = self.default_image_processor UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : Any = model(**snake_case ) # verify the logits UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
679
1
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets a : str = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" a : List[str] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n" a : int = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[ "https://github.com/m-popovic/chrF", ] , ) def A_ ( self , snake_case , snake_case , snake_case = CHRF.CHAR_ORDER , snake_case = CHRF.WORD_ORDER , snake_case = CHRF.BETA , snake_case = False , snake_case = False , snake_case = False , ): '''simple docstring''' UpperCAmelCase : List[str] = len(references[0] ) if any(len(snake_case ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCAmelCase : Dict = [[refs[i] for refs in references] for i in range(snake_case )] UpperCAmelCase : List[Any] = CHRF(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) UpperCAmelCase : int = sb_chrf.corpus_score(snake_case , snake_case ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
679
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : List[Any] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : int = seq_length UpperCAmelCase : Dict = is_training UpperCAmelCase : Optional[Any] = use_input_mask UpperCAmelCase : Optional[Any] = use_token_type_ids UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : int = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : Tuple = attention_probs_dropout_prob UpperCAmelCase : Any = max_position_embeddings UpperCAmelCase : Tuple = type_vocab_size UpperCAmelCase : Union[str, Any] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : Dict = num_labels UpperCAmelCase : Union[str, Any] = num_choices UpperCAmelCase : List[Any] = scope def A_ ( self ): '''simple docstring''' return MPNetConfig.from_pretrained("microsoft/mpnet-base" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[Any] = None UpperCAmelCase : str = None UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model( snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.num_choices UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = model( snake_case , attention_mask=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Any = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : str = True def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MPNetModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" ) UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase : Optional[Any] = model(snake_case )[0] UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Optional[Any] = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
679
1
'''simple docstring''' def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(__magic_name__ ) * abs(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
679
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() a : Optional[Any] = logging.get_logger(__name__) a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: UpperCAmelCase : List[str] = TOKENIZER_CLASSES else: UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name] UpperCAmelCase : Union[str, Any] = True if checkpoint_name is None: UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() ) else: UpperCAmelCase : Dict = [checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" ) UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ ) elif add_prefix: UpperCAmelCase : List[Any] = checkpoint UpperCAmelCase : str = dump_path else: UpperCAmelCase : List[str] = None UpperCAmelCase : List[Any] = dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0] if next_char == "/": UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ ) UpperCAmelCase : Dict = None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) UpperCAmelCase : Any = tokenizer.save_pretrained( __magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(__magic_name__ ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) a : Any = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
679
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets a : Union[str, Any] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" a : int = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" a : List[Any] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = False , ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): UpperCAmelCase : Any = new_id # turn into Numpy arrays UpperCAmelCase : List[Any] = np.array(__magic_name__ ) UpperCAmelCase : int = np.array(__magic_name__ ) if reduce_labels: UpperCAmelCase : Union[str, Any] = 255 UpperCAmelCase : int = label - 1 UpperCAmelCase : Optional[int] = 255 UpperCAmelCase : Union[str, Any] = label != ignore_index UpperCAmelCase : Union[str, Any] = np.not_equal(__magic_name__ , __magic_name__ ) UpperCAmelCase : List[Any] = pred_label[mask] UpperCAmelCase : List[Any] = np.array(__magic_name__ )[mask] UpperCAmelCase : List[str] = pred_label[pred_label == label] UpperCAmelCase : Dict = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0] UpperCAmelCase : Optional[int] = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0] UpperCAmelCase : Optional[Any] = np.histogram(__magic_name__ , bins=__magic_name__ , range=(0, num_labels - 1) )[0] UpperCAmelCase : Union[str, Any] = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = False , ): '''simple docstring''' UpperCAmelCase : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa ) UpperCAmelCase : int = np.zeros((num_labels,) , dtype=np.floataa ) UpperCAmelCase : Any = np.zeros((num_labels,) , dtype=np.floataa ) UpperCAmelCase : Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(__magic_name__ , __magic_name__ ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = intersect_and_union( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = total_intersect_and_union( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # compute metrics UpperCAmelCase : List[Any] = {} UpperCAmelCase : Dict = total_area_intersect.sum() / total_area_label.sum() UpperCAmelCase : List[str] = total_area_intersect / total_area_union UpperCAmelCase : Dict = total_area_intersect / total_area_label UpperCAmelCase : Any = np.nanmean(__magic_name__ ) UpperCAmelCase : Dict = np.nanmean(__magic_name__ ) UpperCAmelCase : Tuple = all_acc UpperCAmelCase : str = iou UpperCAmelCase : str = acc if nan_to_num is not None: UpperCAmelCase : Optional[int] = {metric: np.nan_to_num(__magic_name__ , nan=__magic_name__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) , reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , ): '''simple docstring''' UpperCAmelCase : List[str] = mean_iou( results=snake_case , gt_seg_maps=snake_case , num_labels=snake_case , ignore_index=snake_case , nan_to_num=snake_case , label_map=snake_case , reduce_labels=snake_case , ) return iou_result
679
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE__ : Dict = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE__ : List[str] = "image_qa" SCREAMING_SNAKE_CASE__ : int = AutoProcessor SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE__ : Any = ["image", "text"] SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*snake_case , **snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' return self.pre_processor(snake_case , snake_case , return_tensors="pt" ) def A_ ( self , snake_case ): '''simple docstring''' with torch.no_grad(): return self.model(**snake_case ).logits def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
679
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : str = { "configuration_trajectory_transformer": [ "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = [ "TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TrajectoryTransformerModel", "TrajectoryTransformerPreTrainedModel", "load_tf_weights_in_trajectory_transformer", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
679
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging a : Optional[int] = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = R"\w+[.]\d+" UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ ) for pat in pats: UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) ) return key def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": UpperCAmelCase : Union[str, Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ): '''simple docstring''' UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) ) UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ ) UpperCAmelCase : List[str] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Tuple = rename_key(__magic_name__ ) UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
679
1
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) UpperCAmelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__ ) UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(__magic_name__ ) UpperCAmelCase : Optional[int] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": UpperCAmelCase : List[Any] = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCAmelCase : Dict = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase : List[str] = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): UpperCAmelCase : str = F"layers_{str(__magic_name__ )}" # Self-Attention UpperCAmelCase : Any = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] UpperCAmelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] UpperCAmelCase : Dict = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] UpperCAmelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase : Any = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization UpperCAmelCase : str = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: UpperCAmelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] UpperCAmelCase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: UpperCAmelCase : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] UpperCAmelCase : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization UpperCAmelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning UpperCAmelCase : Dict = flax_model.params["encoder"]["block"][str(__magic_name__ )]["layer"] UpperCAmelCase : List[str] = tax_attention_key UpperCAmelCase : int = tax_attention_out UpperCAmelCase : int = tax_attention_query UpperCAmelCase : int = tax_attention_value UpperCAmelCase : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase : Union[str, Any] = tax_global_layer_norm if split_mlp_wi: UpperCAmelCase : Optional[int] = tax_mlp_wi_a UpperCAmelCase : int = tax_mlp_wi_a else: UpperCAmelCase : str = tax_mlp_wi UpperCAmelCase : Tuple = tax_mlp_wo UpperCAmelCase : List[str] = tax_mlp_layer_norm UpperCAmelCase : int = flax_model_encoder_layer_block # Only for layer 0: UpperCAmelCase : Optional[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T UpperCAmelCase : Optional[Any] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase : Optional[int] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T UpperCAmelCase : Any = tax_encoder_global_rel_embedding # Assigning UpperCAmelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] UpperCAmelCase : Any = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCAmelCase : List[str] = F"layers_{str(__magic_name__ )}" # Self-Attention UpperCAmelCase : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] UpperCAmelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] UpperCAmelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] UpperCAmelCase : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization UpperCAmelCase : Optional[Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention UpperCAmelCase : str = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] UpperCAmelCase : Optional[Any] = tax_enc_dec_attention_module["key"]["kernel"] UpperCAmelCase : int = tax_enc_dec_attention_module["out"]["kernel"] UpperCAmelCase : Optional[int] = tax_enc_dec_attention_module["query"]["kernel"] UpperCAmelCase : Optional[int] = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization UpperCAmelCase : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: UpperCAmelCase : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] UpperCAmelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: UpperCAmelCase : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] UpperCAmelCase : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization UpperCAmelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning UpperCAmelCase : Union[str, Any] = flax_model.params["decoder"]["block"][str(__magic_name__ )]["layer"] UpperCAmelCase : Optional[Any] = tax_attention_key UpperCAmelCase : Tuple = tax_attention_out UpperCAmelCase : int = tax_attention_query UpperCAmelCase : Any = tax_attention_value UpperCAmelCase : Optional[Any] = tax_pre_attention_layer_norm UpperCAmelCase : Optional[int] = tax_enc_dec_attention_key UpperCAmelCase : int = tax_enc_dec_attention_out UpperCAmelCase : List[Any] = tax_enc_dec_attention_query UpperCAmelCase : int = tax_enc_dec_attention_value UpperCAmelCase : Tuple = tax_cross_layer_norm if split_mlp_wi: UpperCAmelCase : List[Any] = tax_mlp_wi_a UpperCAmelCase : Dict = tax_mlp_wi_a else: UpperCAmelCase : Union[str, Any] = tax_mlp_wi UpperCAmelCase : Union[str, Any] = tax_mlp_wo UpperCAmelCase : Optional[int] = txa_mlp_layer_norm UpperCAmelCase : Union[str, Any] = flax_model_decoder_layer_block # Decoder Normalization UpperCAmelCase : Optional[int] = tax_model["target"]["decoder"]["decoder_norm"]["scale"] UpperCAmelCase : int = txa_decoder_norm # Only for layer 0: UpperCAmelCase : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T UpperCAmelCase : Dict = tax_decoder_rel_embedding # Token Embeddings UpperCAmelCase : List[Any] = tax_model["target"]["token_embedder"]["embedding"] UpperCAmelCase : Optional[Any] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCAmelCase : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(__magic_name__ ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) a : Dict = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
679
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,) SCREAMING_SNAKE_CASE__ : List[Any] = 10 def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**snake_case ) return config def A_ ( self ): '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case ) def A_ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def A_ ( self ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case ) def A_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = self.dummy_model() UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Any = sample.to(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : List[Any] = model(snake_case , snake_case ) UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Dict = output.prev_sample UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" ) UpperCAmelCase : List[Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : Dict = self.dummy_model() UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : int = sample.to(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Any = output.prev_sample UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : Optional[int] = self.get_scheduler_config() UpperCAmelCase : Any = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case ) UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : int = self.dummy_model() UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase : str = sample.to(snake_case ) for t in scheduler.timesteps: UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : List[Any] = model(snake_case , snake_case ) UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Dict = output.prev_sample UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.scheduler_classes[0] UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case ) UpperCAmelCase : List[str] = torch.manual_seed(0 ) UpperCAmelCase : Any = self.dummy_model() UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase : List[str] = sample.to(snake_case ) for t in scheduler.timesteps: UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : List[str] = output.prev_sample UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
679
1
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowercase ( __magic_name__ ): '''simple docstring''' def wrapper(*__magic_name__ , **__magic_name__ ): UpperCAmelCase : Tuple = timeit.default_timer() UpperCAmelCase : Dict = func(*__magic_name__ , **__magic_name__ ) UpperCAmelCase : Tuple = timeit.default_timer() - starttime return delta UpperCAmelCase : List[str] = func.__name__ return wrapper def lowercase ( __magic_name__ , __magic_name__=100 , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Tuple = [] UpperCAmelCase : Tuple = seq_shapes or {} for i in range(__magic_name__ ): UpperCAmelCase : Optional[int] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__magic_name__ , _ArrayXD ): UpperCAmelCase : Any = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__magic_name__ , datasets.Value ): if v.dtype == "string": UpperCAmelCase : Union[str, Any] = "The small grey turtle was surprisingly fast when challenged." else: UpperCAmelCase : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__magic_name__ , datasets.Sequence ): while isinstance(__magic_name__ , datasets.Sequence ): UpperCAmelCase : int = v.feature UpperCAmelCase : int = seq_shapes[k] UpperCAmelCase : int = np.random.rand(*__magic_name__ ).astype(v.dtype ) UpperCAmelCase : Union[str, Any] = data dummy_data.append((i, example) ) return dummy_data def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=100 , __magic_name__=None ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = generate_examples(__magic_name__ , num_examples=__magic_name__ , seq_shapes=__magic_name__ ) with ArrowWriter(features=__magic_name__ , path=__magic_name__ ) as writer: for key, record in dummy_data: UpperCAmelCase : Union[str, Any] = features.encode_example(__magic_name__ ) writer.write(__magic_name__ ) UpperCAmelCase , UpperCAmelCase : List[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) UpperCAmelCase : Optional[Any] = datasets.Dataset.from_file(filename=__magic_name__ , info=datasets.DatasetInfo(features=__magic_name__ ) ) return dataset
679
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase : Tuple = input_file.read() UpperCAmelCase : List[Any] = regexp.search(snake_case ) return match def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase : str = regexp.finditer(snake_case ) UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = Path("./datasets" ) UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Path("./datasets" ) UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
679
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
679
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : str = logging.getLogger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ): '''simple docstring''' UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] ) UpperCAmelCase : Optional[int] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case ) self.init_weights() UpperCAmelCase : int = 0 UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : List[Any] = 0 def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = threshold def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = patience def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 0 UpperCAmelCase : List[Any] = 0 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num UpperCAmelCase : List[Any] = ( f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(snake_case ) @add_start_docstrings_to_model_forward(snake_case ) def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: UpperCAmelCase : Dict = input_ids.size() elif inputs_embeds is not None: UpperCAmelCase : Any = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case ) if token_type_ids is None: UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size() UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: UpperCAmelCase : int = torch.ones(snake_case , device=snake_case ) UpperCAmelCase : str = self.invert_attention_mask(snake_case ) else: UpperCAmelCase : int = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers ) UpperCAmelCase : Tuple = self.embeddings( input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case ) UpperCAmelCase : int = embedding_output if self.training: UpperCAmelCase : int = [] for i in range(self.config.num_hidden_layers ): UpperCAmelCase : List[Any] = self.encoder.adaptive_forward( snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case ) UpperCAmelCase : Dict = self.pooler(snake_case ) UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) ) res.append(snake_case ) elif self.patience == 0: # Use all layers for inference UpperCAmelCase : Union[str, Any] = self.encoder( snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] ) UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )] else: UpperCAmelCase : int = 0 UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Optional[Any] = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 UpperCAmelCase : Tuple = self.encoder.adaptive_forward( snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case ) UpperCAmelCase : Any = self.pooler(snake_case ) UpperCAmelCase : int = output_layers[i](snake_case ) if regression: UpperCAmelCase : Optional[Any] = logits.detach() if patient_result is not None: UpperCAmelCase : Union[str, Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: UpperCAmelCase : Optional[Any] = 0 else: UpperCAmelCase : Any = logits.detach().argmax(dim=1 ) if patient_result is not None: UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(snake_case ) ): patient_counter += 1 else: UpperCAmelCase : str = 0 UpperCAmelCase : int = logits if patient_counter == self.patience: break UpperCAmelCase : int = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Union[str, Any] = config.num_labels UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case ) UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase : Any = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(snake_case ) def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ): '''simple docstring''' UpperCAmelCase : int = self.bert( input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) UpperCAmelCase : Tuple = (logits[-1],) if labels is not None: UpperCAmelCase : Optional[int] = None UpperCAmelCase : List[Any] = 0 for ix, logits_item in enumerate(snake_case ): if self.num_labels == 1: # We are doing regression UpperCAmelCase : Dict = MSELoss() UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase : Optional[int] = CrossEntropyLoss() UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: UpperCAmelCase : int = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs return outputs
679
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = "ZinengTang/tvlt-base" UpperCAmelCase : List[str] = tempfile.mkdtemp() def A_ ( self , **snake_case ): '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **snake_case ) def A_ ( self , **snake_case ): '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **snake_case ) def A_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_image_processor() UpperCAmelCase : Optional[Any] = self.get_feature_extractor() UpperCAmelCase : int = TvltProcessor(image_processor=snake_case , feature_extractor=snake_case ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase : List[str] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , snake_case ) self.assertIsInstance(processor.image_processor , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_image_processor() UpperCAmelCase : Union[str, Any] = self.get_feature_extractor() UpperCAmelCase : Dict = TvltProcessor(image_processor=snake_case , feature_extractor=snake_case ) UpperCAmelCase : Union[str, Any] = np.ones([1_2_0_0_0] ) UpperCAmelCase : Optional[Any] = feature_extractor(snake_case , return_tensors="np" ) UpperCAmelCase : Any = processor(audio=snake_case , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.get_image_processor() UpperCAmelCase : int = self.get_feature_extractor() UpperCAmelCase : Dict = TvltProcessor(image_processor=snake_case , feature_extractor=snake_case ) UpperCAmelCase : Tuple = np.ones([3, 2_2_4, 2_2_4] ) UpperCAmelCase : List[str] = image_processor(snake_case , return_tensors="np" ) UpperCAmelCase : Tuple = processor(images=snake_case , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.get_image_processor() UpperCAmelCase : Optional[Any] = self.get_feature_extractor() UpperCAmelCase : int = TvltProcessor(image_processor=snake_case , feature_extractor=snake_case ) UpperCAmelCase : int = np.ones([1_2_0_0_0] ) UpperCAmelCase : Dict = np.ones([3, 2_2_4, 2_2_4] ) UpperCAmelCase : Optional[Any] = processor(audio=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_image_processor() UpperCAmelCase : str = self.get_feature_extractor() UpperCAmelCase : List[str] = TvltProcessor(image_processor=snake_case , feature_extractor=snake_case ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
679
'''simple docstring''' import math import tensorflow as tf from packaging import version def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype ) UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ ) return x * tf.tanh(tf.math.softplus(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( __magic_name__ ): '''simple docstring''' return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 ) def lowercase ( __magic_name__ , __magic_name__=-1 ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ ) return a * tf.math.sigmoid(__magic_name__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( __magic_name__ ): '''simple docstring''' return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ ) a : Tuple = tf.keras.activations.gelu a : Dict = approximate_gelu_wrap else: a : List[str] = _gelu a : List[Any] = _gelu_new a : Optional[int] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( __magic_name__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
679
1
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING a : int = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , *snake_case , **snake_case ): '''simple docstring''' super().__init__(*snake_case , **snake_case ) self.check_model_type(snake_case ) def A_ ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = {}, {} if padding is not None: UpperCAmelCase : Any = padding if truncation is not None: UpperCAmelCase : Any = truncation if top_k is not None: UpperCAmelCase : str = top_k return preprocess_params, {}, postprocess_params def __call__( self , snake_case , snake_case = None , **snake_case ): '''simple docstring''' if isinstance(snake_case , (Image.Image, str) ) and isinstance(snake_case , snake_case ): UpperCAmelCase : Tuple = {"image": image, "question": question} else: UpperCAmelCase : str = image UpperCAmelCase : str = super().__call__(snake_case , **snake_case ) return results def A_ ( self , snake_case , snake_case=False , snake_case=False ): '''simple docstring''' UpperCAmelCase : List[Any] = load_image(inputs["image"] ) UpperCAmelCase : Optional[Any] = self.tokenizer( inputs["question"] , return_tensors=self.framework , padding=snake_case , truncation=snake_case ) UpperCAmelCase : Optional[Any] = self.image_processor(images=snake_case , return_tensors=self.framework ) model_inputs.update(snake_case ) return model_inputs def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.model(**snake_case ) return model_outputs def A_ ( self , snake_case , snake_case=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: UpperCAmelCase : Tuple = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase : Optional[int] = model_outputs.logits.sigmoid()[0] UpperCAmelCase , UpperCAmelCase : str = probs.topk(snake_case ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) UpperCAmelCase : Optional[int] = scores.tolist() UpperCAmelCase : List[Any] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case )]
679
'''simple docstring''' from __future__ import annotations class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = order # a_{0} ... a_{k} UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order # b_{0} ... b_{k} UpperCAmelCase : List[Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] UpperCAmelCase : Dict = [0.0] * self.order # y[n-1] ... y[n-k] UpperCAmelCase : Optional[Any] = [0.0] * self.order def A_ ( self , snake_case , snake_case ): '''simple docstring''' if len(snake_case ) < self.order: UpperCAmelCase : Dict = [1.0, *a_coeffs] if len(snake_case ) != self.order + 1: UpperCAmelCase : Optional[Any] = ( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(snake_case )}" ) raise ValueError(snake_case ) if len(snake_case ) != self.order + 1: UpperCAmelCase : Optional[Any] = ( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(snake_case )}" ) raise ValueError(snake_case ) UpperCAmelCase : Optional[int] = a_coeffs UpperCAmelCase : Optional[Any] = b_coeffs def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] UpperCAmelCase : List[str] = self.input_history[:-1] UpperCAmelCase : List[Any] = self.output_history[:-1] UpperCAmelCase : str = sample UpperCAmelCase : str = result return result
679
1
'''simple docstring''' class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case = "" , snake_case = False ): '''simple docstring''' UpperCAmelCase : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word UpperCAmelCase : Tuple = is_leaf UpperCAmelCase : Optional[Any] = prefix def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = 0 for q, w in zip(self.prefix , snake_case ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def A_ ( self , snake_case ): '''simple docstring''' for word in words: self.insert(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' if self.prefix == word: UpperCAmelCase : Any = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: UpperCAmelCase : Dict = RadixNode(prefix=snake_case , is_leaf=snake_case ) else: UpperCAmelCase : Union[str, Any] = self.nodes[word[0]] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = incoming_node.match( snake_case ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(snake_case ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: UpperCAmelCase : Optional[Any] = remaining_prefix UpperCAmelCase : Dict = self.nodes[matching_string[0]] UpperCAmelCase : Optional[Any] = RadixNode(snake_case , snake_case ) UpperCAmelCase : List[Any] = aux_node if remaining_word == "": UpperCAmelCase : Tuple = True else: self.nodes[matching_string[0]].insert(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = self.nodes.get(word[0] , snake_case ) if not incoming_node: return False else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = incoming_node.match( snake_case ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.nodes.get(word[0] , snake_case ) if not incoming_node: return False else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = incoming_node.match( snake_case ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(snake_case ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: UpperCAmelCase : Optional[int] = list(self.nodes.values() )[0] UpperCAmelCase : Optional[Any] = merging_node.is_leaf self.prefix += merging_node.prefix UpperCAmelCase : str = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: UpperCAmelCase : List[Any] = False # If there is 1 edge, we merge it with its child else: UpperCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0] UpperCAmelCase : List[str] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix UpperCAmelCase : List[str] = merging_node.nodes return True def A_ ( self , snake_case = 0 ): '''simple docstring''' if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[str] = "banana bananas bandana band apple all beast".split() UpperCAmelCase : str = RadixNode() root.insert_many(__magic_name__ ) assert all(root.find(__magic_name__ ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def lowercase ( ): '''simple docstring''' assert test_trie() def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = RadixNode() UpperCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(__magic_name__ ) print("Words:" , __magic_name__ ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
679
'''simple docstring''' import argparse from collections import defaultdict def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = F"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(__magic_name__ , "r" ) as f: UpperCAmelCase : Tuple = f.readlines() UpperCAmelCase : Tuple = F"class {class_name}(" UpperCAmelCase : str = F"{4 * ' '}def {test_name}(" UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}" UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}" UpperCAmelCase : Optional[int] = False UpperCAmelCase : List[str] = False UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Dict = False UpperCAmelCase : Tuple = 0 UpperCAmelCase : int = 0 UpperCAmelCase : Tuple = [] for line in lines: if line.startswith(__magic_name__ ): UpperCAmelCase : int = True elif in_class and line.startswith(__magic_name__ ): UpperCAmelCase : Dict = True elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )): UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: UpperCAmelCase : List[str] = True if in_class and in_func and in_line: if ")" not in line: continue else: UpperCAmelCase : List[str] = True if in_class and in_func and in_line and insert_line: new_lines.append(F"{spaces * ' '}{correct_line}" ) UpperCAmelCase : List[str] = False else: new_lines.append(__magic_name__ ) with open(__magic_name__ , "w" ) as f: for line in new_lines: f.write(__magic_name__ ) def lowercase ( __magic_name__ , __magic_name__=None ): '''simple docstring''' if fail is not None: with open(__magic_name__ , "r" ) as f: UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()} else: UpperCAmelCase : Any = None with open(__magic_name__ , "r" ) as f: UpperCAmelCase : Tuple = f.readlines() UpperCAmelCase : int = defaultdict(__magic_name__ ) for line in correct_lines: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) if __name__ == "__main__": a : str = argparse.ArgumentParser() parser.add_argument("--correct_filename", help="filename of tests with expected result") parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) a : List[Any] = parser.parse_args() main(args.correct_filename, args.fail_filename)
679
1
'''simple docstring''' def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [False] * len(__magic_name__ ) UpperCAmelCase : Dict = [] queue.append(__magic_name__ ) UpperCAmelCase : Any = True while queue: UpperCAmelCase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__magic_name__ ) UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Optional[Any] = u return visited[t] def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = [-1] * (len(__magic_name__ )) UpperCAmelCase : List[str] = 0 while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): UpperCAmelCase : List[str] = float("Inf" ) UpperCAmelCase : int = sink while s != source: # Find the minimum value in select path UpperCAmelCase : Dict = min(__magic_name__ , graph[parent[s]][s] ) UpperCAmelCase : Tuple = parent[s] max_flow += path_flow UpperCAmelCase : int = sink while v != source: UpperCAmelCase : str = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow UpperCAmelCase : Optional[Any] = parent[v] return max_flow a : Optional[int] = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] a , a : Tuple = 0, 5 print(ford_fulkerson(graph, source, sink))
679
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : TreeNode | None = None SCREAMING_SNAKE_CASE__ : TreeNode | None = None a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( __magic_name__ ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(__magic_name__ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__magic_name__ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(__magic_name__ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left ) UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right ) UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess UpperCAmelCase : int = 1 - right_distrib_excess UpperCAmelCase : List[Any] = ( left_distrib_moves + right_distrib_moves + abs(__magic_name__ ) + abs(__magic_name__ ) ) UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__magic_name__ , __magic_name__ ) return get_distrib(__magic_name__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
679
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = BlenderbotConfig SCREAMING_SNAKE_CASE__ : int = {} SCREAMING_SNAKE_CASE__ : Dict = "gelu" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=False , snake_case=9_9 , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case=0.1 , snake_case=0.1 , snake_case=2_0 , snake_case=2 , snake_case=1 , snake_case=0 , ): '''simple docstring''' UpperCAmelCase : Optional[int] = parent UpperCAmelCase : Optional[Any] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Any = is_training UpperCAmelCase : Union[str, Any] = use_labels UpperCAmelCase : str = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : Tuple = eos_token_id UpperCAmelCase : Union[str, Any] = pad_token_id UpperCAmelCase : Any = bos_token_id def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase : Tuple = prepare_blenderbot_inputs_dict(snake_case , snake_case , snake_case ) return config, inputs_dict def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = TFBlenderbotModel(config=snake_case ).get_decoder() UpperCAmelCase : Union[str, Any] = inputs_dict["input_ids"] UpperCAmelCase : int = input_ids[:1, :] UpperCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :] UpperCAmelCase : int = inputs_dict["head_mask"] UpperCAmelCase : str = 1 # first forward pass UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case ) UpperCAmelCase , UpperCAmelCase : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case )[0] UpperCAmelCase : List[Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case , snake_case , rtol=1e-3 ) def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase : Any = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Dict = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : Optional[int] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = TFBlenderbotModelTester(self ) UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case ) @require_tokenizers @require_tf class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["My friends are cool but they eat too many carbs."] SCREAMING_SNAKE_CASE__ : Union[str, Any] = "facebook/blenderbot-400M-distill" @cached_property def A_ ( self ): '''simple docstring''' return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.tokenizer(self.src_text , return_tensors="tf" ) UpperCAmelCase : str = self.model.generate( model_inputs.input_ids , ) UpperCAmelCase : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
679
'''simple docstring''' import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a : List[Any] = logging.get_logger(__name__) a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} a : int = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } a : Any = { "allenai/led-base-16384": 1_63_84, } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"] def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ): '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space: UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) ) UpperCAmelCase : Any = add_prefix_space UpperCAmelCase : str = pre_tok_class(**snake_case ) UpperCAmelCase : int = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase : Dict = "post_processor" UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case ) if tokenizer_component_instance: UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase : int = tuple(state["sep"] ) if "cls" in state: UpperCAmelCase : Union[str, Any] = tuple(state["cls"] ) UpperCAmelCase : Tuple = False if state.get("add_prefix_space" , snake_case ) != add_prefix_space: UpperCAmelCase : Optional[Any] = add_prefix_space UpperCAmelCase : Optional[int] = True if state.get("trim_offsets" , snake_case ) != trim_offsets: UpperCAmelCase : Tuple = trim_offsets UpperCAmelCase : List[str] = True if changes_to_apply: UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) ) UpperCAmelCase : Tuple = component_class(**snake_case ) setattr(self.backend_tokenizer , snake_case , snake_case ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A_ ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value UpperCAmelCase : Optional[Any] = value def A_ ( self , *snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case , **snake_case ) def A_ ( self , *snake_case , **snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case , **snake_case ) def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case ) def A_ ( self , snake_case , snake_case=None ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [self.sep_token_id] UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : int = super()._pad( encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase : int = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case ) if needs_to_be_padded: UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase : List[str] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
679
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a : Any = TypeVar("T") class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = data UpperCAmelCase : Node[T] | None = None def __str__( self ): '''simple docstring''' return f"{self.data}" class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self ): '''simple docstring''' UpperCAmelCase : Node[T] | None = None def __iter__( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.top while node: yield node.data UpperCAmelCase : int = node.next def __str__( self ): '''simple docstring''' return "->".join([str(snake_case ) for item in self] ) def __len__( self ): '''simple docstring''' return len(tuple(iter(self ) ) ) def A_ ( self ): '''simple docstring''' return self.top is None def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = Node(snake_case ) if not self.is_empty(): UpperCAmelCase : Union[str, Any] = self.top UpperCAmelCase : str = node def A_ ( self ): '''simple docstring''' if self.is_empty(): raise IndexError("pop from empty stack" ) assert isinstance(self.top , snake_case ) UpperCAmelCase : Optional[int] = self.top UpperCAmelCase : Any = self.top.next return pop_node.data def A_ ( self ): '''simple docstring''' if self.is_empty(): raise IndexError("peek from empty stack" ) assert self.top is not None return self.top.data def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = None if __name__ == "__main__": from doctest import testmod testmod()
679
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowercase ( __magic_name__="" ): '''simple docstring''' UpperCAmelCase : Dict = tempfile.mkdtemp() return os.path.join(__magic_name__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 UpperCAmelCase : int = AgentAudio(snake_case ) UpperCAmelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(snake_case ) ) # Ensure that the file contains the same value as the original tensor UpperCAmelCase , UpperCAmelCase : str = sf.read(snake_case ) self.assertTrue(torch.allclose(snake_case , torch.tensor(snake_case ) , atol=1e-4 ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = torch.rand(1_2 , dtype=torch.floataa ) - 0.5 UpperCAmelCase : Any = get_new_path(suffix=".wav" ) sf.write(snake_case , snake_case , 1_6_0_0_0 ) UpperCAmelCase : Optional[Any] = AgentAudio(snake_case ) self.assertTrue(torch.allclose(snake_case , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , snake_case ) @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) ) UpperCAmelCase : Tuple = AgentImage(snake_case ) UpperCAmelCase : Tuple = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(snake_case , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCAmelCase : Any = Image.open(snake_case ) UpperCAmelCase : List[str] = AgentImage(snake_case ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" UpperCAmelCase : Dict = Image.open(snake_case ) UpperCAmelCase : int = AgentImage(snake_case ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(snake_case ) ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = "Hey!" UpperCAmelCase : Tuple = AgentText(snake_case ) self.assertEqual(snake_case , agent_type.to_string() ) self.assertEqual(snake_case , agent_type.to_raw() ) self.assertEqual(snake_case , snake_case )
679
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = 0 UpperCAmelCase : Dict = len(__magic_name__ ) # No of vertices in graph UpperCAmelCase : Dict = [0] * n UpperCAmelCase : List[Any] = [False] * n def dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): UpperCAmelCase : Dict = True UpperCAmelCase : str = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ ) UpperCAmelCase : Union[str, Any] = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge UpperCAmelCase : Optional[Any] = min(low[at] , low[to] ) UpperCAmelCase : list[tuple[int, int]] = [] for i in range(__magic_name__ ): if not visited[i]: dfs(__magic_name__ , -1 , __magic_name__ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
679
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' def get_masked_lm_array(__magic_name__ ): UpperCAmelCase : Tuple = F"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : str = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_array(__magic_name__ ): UpperCAmelCase : List[Any] = F"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : Optional[Any] = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : str = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_layer_array(__magic_name__ , __magic_name__ ): UpperCAmelCase : Union[str, Any] = F"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : int = tf.train.load_variable(__magic_name__ , __magic_name__ ) if "kernel" in name: UpperCAmelCase : Optional[int] = array.transpose() return torch.from_numpy(__magic_name__ ) def get_encoder_attention_layer_array(__magic_name__ , __magic_name__ , __magic_name__ ): UpperCAmelCase : Tuple = F"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" UpperCAmelCase : List[str] = tf.train.load_variable(__magic_name__ , __magic_name__ ) UpperCAmelCase : int = array.reshape(__magic_name__ ) if "kernel" in name: UpperCAmelCase : Optional[Any] = array.transpose() return torch.from_numpy(__magic_name__ ) print(F"Loading model based on config from {config_path}..." ) UpperCAmelCase : Optional[Any] = BertConfig.from_json_file(__magic_name__ ) UpperCAmelCase : Optional[Any] = BertForMaskedLM(__magic_name__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention UpperCAmelCase : BertSelfAttention = layer.attention.self UpperCAmelCase : List[Any] = get_encoder_attention_layer_array( __magic_name__ , "_query_dense/kernel" , self_attn.query.weight.data.shape ) UpperCAmelCase : Tuple = get_encoder_attention_layer_array( __magic_name__ , "_query_dense/bias" , self_attn.query.bias.data.shape ) UpperCAmelCase : int = get_encoder_attention_layer_array( __magic_name__ , "_key_dense/kernel" , self_attn.key.weight.data.shape ) UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array( __magic_name__ , "_key_dense/bias" , self_attn.key.bias.data.shape ) UpperCAmelCase : Tuple = get_encoder_attention_layer_array( __magic_name__ , "_value_dense/kernel" , self_attn.value.weight.data.shape ) UpperCAmelCase : str = get_encoder_attention_layer_array( __magic_name__ , "_value_dense/bias" , self_attn.value.bias.data.shape ) # Self-attention Output UpperCAmelCase : BertSelfOutput = layer.attention.output UpperCAmelCase : str = get_encoder_attention_layer_array( __magic_name__ , "_output_dense/kernel" , self_output.dense.weight.data.shape ) UpperCAmelCase : Union[str, Any] = get_encoder_attention_layer_array( __magic_name__ , "_output_dense/bias" , self_output.dense.bias.data.shape ) UpperCAmelCase : str = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/gamma" ) UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_attention_layer_norm/beta" ) # Intermediate UpperCAmelCase : BertIntermediate = layer.intermediate UpperCAmelCase : Dict = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/kernel" ) UpperCAmelCase : Tuple = get_encoder_layer_array(__magic_name__ , "_intermediate_dense/bias" ) # Output UpperCAmelCase : BertOutput = layer.output UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/kernel" ) UpperCAmelCase : Optional[Any] = get_encoder_layer_array(__magic_name__ , "_output_dense/bias" ) UpperCAmelCase : List[str] = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/gamma" ) UpperCAmelCase : Any = get_encoder_layer_array(__magic_name__ , "_output_layer_norm/beta" ) # Embeddings UpperCAmelCase : int = get_encoder_array("_position_embedding_layer/embeddings" ) UpperCAmelCase : str = get_encoder_array("_type_embedding_layer/embeddings" ) UpperCAmelCase : Optional[Any] = get_encoder_array("_embedding_norm_layer/gamma" ) UpperCAmelCase : Any = get_encoder_array("_embedding_norm_layer/beta" ) # LM Head UpperCAmelCase : str = model.cls.predictions.transform UpperCAmelCase : List[Any] = get_masked_lm_array("dense/kernel" ) UpperCAmelCase : List[Any] = get_masked_lm_array("dense/bias" ) UpperCAmelCase : Optional[Any] = get_masked_lm_array("layer_norm/gamma" ) UpperCAmelCase : Union[str, Any] = get_masked_lm_array("layer_norm/beta" ) UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" ) # Pooling UpperCAmelCase : str = BertPooler(config=__magic_name__ ) UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/kernel" ) UpperCAmelCase : BertPooler = get_encoder_array("_pooler_layer/bias" ) # Export final model model.save_pretrained(__magic_name__ ) # Integration test - should load without any errors ;) UpperCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(__magic_name__ ) print(new_model.eval() ) print("Model conversion was done sucessfully!" ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) a : Any = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
679
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : Optional[int] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a : List[Any] = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a : str = {"facebook/blenderbot_small-90M": 5_12} def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = set() UpperCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase : List[str] = char UpperCAmelCase : List[str] = set(__magic_name__ ) return pairs class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : int = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : int = ["input_ids", "attention_mask"] def __init__( self , snake_case , snake_case , snake_case="__start__" , snake_case="__end__" , snake_case="__unk__" , snake_case="__null__" , **snake_case , ): '''simple docstring''' super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case ) with open(snake_case , encoding="utf-8" ) as vocab_handle: UpperCAmelCase : Tuple = json.load(snake_case ) UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()} with open(snake_case , encoding="utf-8" ) as merges_handle: UpperCAmelCase : Optional[int] = merges_handle.read().split("\n" )[1:-1] UpperCAmelCase : str = [tuple(merge.split() ) for merge in merges] UpperCAmelCase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : Optional[Any] = {} @property def A_ ( self ): '''simple docstring''' return len(self.encoder ) def A_ ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def A_ ( self , snake_case ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase : List[Any] = re.sub("([.,!?()])" , r" \1" , snake_case ) UpperCAmelCase : Dict = re.sub("(')" , r" \1 " , snake_case ) UpperCAmelCase : str = re.sub(r"\s{2,}" , " " , snake_case ) if "\n" in token: UpperCAmelCase : List[str] = token.replace("\n" , " __newln__" ) UpperCAmelCase : List[Any] = token.split(" " ) UpperCAmelCase : Optional[int] = [] for token in tokens: if not len(snake_case ): continue UpperCAmelCase : Dict = token.lower() UpperCAmelCase : str = tuple(snake_case ) UpperCAmelCase : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase : Optional[int] = get_pairs(snake_case ) if not pairs: words.append(snake_case ) continue while True: UpperCAmelCase : Optional[Any] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase , UpperCAmelCase : List[str] = bigram UpperCAmelCase : List[str] = [] UpperCAmelCase : Any = 0 while i < len(snake_case ): try: UpperCAmelCase : Union[str, Any] = word.index(snake_case , snake_case ) new_word.extend(word[i:j] ) UpperCAmelCase : int = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase : int = tuple(snake_case ) UpperCAmelCase : str = new_word if len(snake_case ) == 1: break else: UpperCAmelCase : List[Any] = get_pairs(snake_case ) UpperCAmelCase : Union[str, Any] = "@@ ".join(snake_case ) UpperCAmelCase : List[str] = word[:-4] UpperCAmelCase : str = word words.append(snake_case ) return " ".join(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : str = re.findall(r"\S+\n?" , snake_case ) for token in words: split_tokens.extend(list(self.bpe(snake_case ).split(" " ) ) ) return split_tokens def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = token.lower() return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) ) def A_ ( self , snake_case ): '''simple docstring''' return self.decoder.get(snake_case , self.unk_token ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = " ".join(snake_case ).replace("@@ " , "" ).strip() return out_string def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if not os.path.isdir(snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase : int = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : int = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" ) UpperCAmelCase : List[str] = 0 with open(snake_case , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) UpperCAmelCase : List[Any] = token_index writer.write(" ".join(snake_case ) + "\n" ) index += 1 return vocab_file, merge_file
679
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path a : str = "src/transformers" # Matches is_xxx_available() a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} a : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a : Any = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available a : Dict = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") a : Any = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a : List[str] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", a : Union[str, Any] = re.compile("^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], a : List[str] = re.compile("^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo a : Any = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: a : Union[str, Any] = re.compile(R"^\s*try:") # Catches a line with else: a : Tuple = re.compile(R"^\s*else:") def lowercase ( __magic_name__ ): '''simple docstring''' if _re_test_backend.search(__magic_name__ ) is None: return None UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__magic_name__ )] backends.sort() return "_and_".join(__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' with open(__magic_name__ , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCAmelCase : str = f.readlines() UpperCAmelCase : Optional[int] = 0 while line_index < len(__magic_name__ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__magic_name__ ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase : str = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: UpperCAmelCase : List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__magic_name__ ): UpperCAmelCase : int = _re_one_line_import_struct.search(__magic_name__ ).groups()[0] UpperCAmelCase : Any = re.findall("\[([^\]]+)\]" , __magic_name__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(__magic_name__ ) if single_line_import_search is not None: UpperCAmelCase : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase : Dict = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : List[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): UpperCAmelCase : List[str] = lines[line_index] if _re_import_struct_add_one.search(__magic_name__ ) is not None: objects.append(_re_import_struct_add_one.search(__magic_name__ ).groups()[0] ) elif _re_import_struct_add_many.search(__magic_name__ ) is not None: UpperCAmelCase : List[str] = _re_import_struct_add_many.search(__magic_name__ ).groups()[0].split(", " ) UpperCAmelCase : int = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif _re_between_brackets.search(__magic_name__ ) is not None: UpperCAmelCase : Optional[Any] = _re_between_brackets.search(__magic_name__ ).groups()[0].split(", " ) UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__magic_name__ ) > 0] objects.extend(__magic_name__ ) elif _re_quote_object.search(__magic_name__ ) is not None: objects.append(_re_quote_object.search(__magic_name__ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase : List[str] = [] while ( line_index < len(__magic_name__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): UpperCAmelCase : int = lines[line_index] UpperCAmelCase : Tuple = _re_import.search(__magic_name__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase : Optional[Any] = {"none": objects} # Let's continue with backend-specific objects while line_index < len(__magic_name__ ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase : Optional[int] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): UpperCAmelCase : str = lines[line_index] UpperCAmelCase : Tuple = _re_import.search(__magic_name__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase : Dict = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' def find_duplicates(__magic_name__ ): return [k for k, v in collections.Counter(__magic_name__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase : Tuple = [] for key in import_dict_objects.keys(): UpperCAmelCase : List[str] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase : List[Any] = "base imports" if key == "none" else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = [] for root, _, files in os.walk(__magic_name__ ): if "__init__.py" in files: UpperCAmelCase : Dict = os.path.join(__magic_name__ , "__init__.py" ) UpperCAmelCase : Optional[Any] = parse_init(__magic_name__ ) if objects is not None: UpperCAmelCase : int = analyze_results(*__magic_name__ ) if len(__magic_name__ ) > 0: UpperCAmelCase : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(__magic_name__ ) ) if len(__magic_name__ ) > 0: raise ValueError("\n\n".join(__magic_name__ ) ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = [] for path, directories, files in os.walk(__magic_name__ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(__magic_name__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__magic_name__ ) / folder).glob("*.py" ) ) ) == 0: continue UpperCAmelCase : Any = str((Path(__magic_name__ ) / folder).relative_to(__magic_name__ ) ) UpperCAmelCase : Optional[Any] = short_path.replace(os.path.sep , "." ) submodules.append(__magic_name__ ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase : List[str] = str((Path(__magic_name__ ) / fname).relative_to(__magic_name__ ) ) UpperCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(__magic_name__ ) return submodules a : str = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", ] def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = importlib.util.spec_from_file_location( "transformers" , os.path.join(__magic_name__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) UpperCAmelCase : Optional[int] = spec.loader.load_module() UpperCAmelCase : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__magic_name__ ) > 0: UpperCAmelCase : List[str] = "\n".join(F"- {module}" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
679
1
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' try: UpperCAmelCase : int = float(__magic_name__ ) except ValueError: raise ValueError("Please enter a valid number" ) UpperCAmelCase : Tuple = decimal - int(__magic_name__ ) if fractional_part == 0: return int(__magic_name__ ), 1 else: UpperCAmelCase : List[Any] = len(str(__magic_name__ ).split("." )[1] ) UpperCAmelCase : str = int(decimal * (10**number_of_frac_digits) ) UpperCAmelCase : str = 10**number_of_frac_digits UpperCAmelCase , UpperCAmelCase : str = denominator, numerator while True: UpperCAmelCase : Optional[int] = dividend % divisor if remainder == 0: break UpperCAmelCase , UpperCAmelCase : List[str] = divisor, remainder UpperCAmelCase , UpperCAmelCase : Dict = numerator / divisor, denominator / divisor return int(__magic_name__ ), int(__magic_name__ ) if __name__ == "__main__": print(F'{decimal_to_fraction(2) = }') print(F'{decimal_to_fraction(8_9.0) = }') print(F'{decimal_to_fraction("67") = }') print(F'{decimal_to_fraction("45.0") = }') print(F'{decimal_to_fraction(1.5) = }') print(F'{decimal_to_fraction("6.25") = }') print(F'{decimal_to_fraction("78td") = }')
679
'''simple docstring''' import os def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = os.path.dirname(os.path.realpath(__magic_name__ ) ) UpperCAmelCase : Any = os.path.join(__magic_name__ , "triangle.txt" ) with open(__magic_name__ ) as f: UpperCAmelCase : str = f.readlines() UpperCAmelCase : Optional[int] = [] for line in triangle: UpperCAmelCase : List[str] = [] for number in line.strip().split(" " ): numbers_from_line.append(int(__magic_name__ ) ) a.append(__magic_name__ ) for i in range(1 , len(__magic_name__ ) ): for j in range(len(a[i] ) ): UpperCAmelCase : Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCAmelCase : List[str] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__magic_name__ , __magic_name__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
679
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : List[str] = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
679
'''simple docstring''' def lowercase ( __magic_name__ ): '''simple docstring''' if n == 1 or not isinstance(__magic_name__ , __magic_name__ ): return 0 elif n == 2: return 1 else: UpperCAmelCase : Optional[int] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Union[str, Any] = 2 while digits < n: index += 1 UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) ) return index def lowercase ( __magic_name__ = 1000 ): '''simple docstring''' return fibonacci_digits_index(__magic_name__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
679
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 2_0} UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8} UpperCAmelCase : Any = parent UpperCAmelCase : int = batch_size UpperCAmelCase : List[Any] = num_channels UpperCAmelCase : List[Any] = image_size UpperCAmelCase : Optional[Any] = min_resolution UpperCAmelCase : Any = max_resolution UpperCAmelCase : int = do_resize UpperCAmelCase : Optional[Any] = size UpperCAmelCase : List[Any] = do_center_crop UpperCAmelCase : Any = crop_size def A_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = MobileNetVaImageProcessingTester(self ) @property def A_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , "do_resize" ) ) self.assertTrue(hasattr(snake_case , "size" ) ) self.assertTrue(hasattr(snake_case , "do_center_crop" ) ) self.assertTrue(hasattr(snake_case , "crop_size" ) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 2_0} ) self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} ) UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2} ) self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase : Any = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase : List[str] = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched UpperCAmelCase : Tuple = image_processing(snake_case , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
679
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint a : List[str] = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } a : Dict = { "169M": 7_68, "430M": 10_24, "1B5": 20_48, "3B": 25_60, "7B": 40_96, "14B": 51_20, } def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Dict = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase : str = state_dict.pop(__magic_name__ ) # emb -> embedding if name.startswith("emb." ): UpperCAmelCase : str = name.replace("emb." , "embeddings." ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0" ): UpperCAmelCase : int = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" ) # att -> attention UpperCAmelCase : Optional[int] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __magic_name__ ) # ffn -> feed_forward UpperCAmelCase : Tuple = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __magic_name__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k" ): UpperCAmelCase : Optional[Any] = name.replace(".time_mix_k" , ".time_mix_key" ) # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v" ): UpperCAmelCase : List[str] = name.replace(".time_mix_v" , ".time_mix_value" ) # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r" ): UpperCAmelCase : List[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" ) if name != "head.weight": UpperCAmelCase : List[str] = "rwkv." + name UpperCAmelCase : List[Any] = weight return state_dict def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=False , __magic_name__=None ): '''simple docstring''' if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer." ) UpperCAmelCase : List[str] = 5_0277 UpperCAmelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" ) else: UpperCAmelCase : List[Any] = PreTrainedTokenizerFast(tokenizer_file=__magic_name__ ) UpperCAmelCase : List[Any] = len(__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) # 2. Build the config UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase : Union[str, Any] = candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument." ) if size not in possible_sizes: raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." ) UpperCAmelCase : str = RwkvConfig( vocab_size=__magic_name__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__magic_name__ ) # 3. Download model file then convert state_dict UpperCAmelCase : Union[str, Any] = hf_hub_download(__magic_name__ , __magic_name__ ) UpperCAmelCase : Optional[Any] = torch.load(__magic_name__ , map_location="cpu" ) UpperCAmelCase : Union[str, Any] = convert_state_dict(__magic_name__ ) # 4. Split in shards and save UpperCAmelCase , UpperCAmelCase : Any = shard_checkpoint(__magic_name__ ) for shard_file, shard in shards.items(): torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) ) if index is not None: UpperCAmelCase : int = os.path.join(__magic_name__ , __magic_name__ ) # Save the index as well with open(__magic_name__ , "w" , encoding="utf-8" ) as f: UpperCAmelCase : List[Any] = json.dumps(__magic_name__ , indent=2 , sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) UpperCAmelCase : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase : Dict = torch.load(os.path.join(__magic_name__ , __magic_name__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__magic_name__ , __magic_name__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub." ) UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__magic_name__ ) model.push_to_hub(__magic_name__ , max_shard_size="2GB" ) tokenizer.push_to_hub(__magic_name__ ) if __name__ == "__main__": a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) a : Dict = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
679
1
'''simple docstring''' from itertools import permutations def lowercase ( __magic_name__ ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(__magic_name__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( __magic_name__ = 10 ): '''simple docstring''' return sum( int("".join(map(__magic_name__ , __magic_name__ ) ) ) for num in permutations(range(__magic_name__ ) ) if is_substring_divisible(__magic_name__ ) ) if __name__ == "__main__": print(F'{solution() = }')
679
'''simple docstring''' def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" UpperCAmelCase : List[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" UpperCAmelCase : Dict = max(len(__magic_name__ ) , len(__magic_name__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
679
1
'''simple docstring''' from ... import PretrainedConfig a : List[Any] = { "sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json", } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP SCREAMING_SNAKE_CASE__ : List[Any] = "nezha" def __init__( self , snake_case=2_1_1_2_8 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=6_4 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=0.1 , snake_case=0 , snake_case=2 , snake_case=3 , snake_case=True , **snake_case , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : Any = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : List[Any] = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = max_position_embeddings UpperCAmelCase : Tuple = max_relative_position UpperCAmelCase : int = type_vocab_size UpperCAmelCase : List[str] = initializer_range UpperCAmelCase : Dict = layer_norm_eps UpperCAmelCase : Tuple = classifier_dropout UpperCAmelCase : int = use_cache
679
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : Optional[Any] = "pt" elif is_tf_available(): a : List[Any] = "tf" else: a : List[Any] = "jax" class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer SCREAMING_SNAKE_CASE__ : List[str] = False def A_ ( self ): '''simple docstring''' super().setUp() UpperCAmelCase : List[str] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A_ ( self ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" ) def A_ ( self , **snake_case ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case ) def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ): '''simple docstring''' UpperCAmelCase : Optional[Any] = [] for i in range(len(snake_case ) ): try: UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) ) UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) ) if max_length is not None and len(snake_case ) > max_length: UpperCAmelCase : Optional[Any] = toks[:max_length] if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0: while len(snake_case ) < min_length: UpperCAmelCase : Any = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase : Dict = [t[0] for t in toks] # Ensure consistency UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) if " " not in output_txt and len(snake_case ) > 1: UpperCAmelCase : Dict = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case ) ) if with_prefix_space: UpperCAmelCase : Union[str, Any] = " " + output_txt UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case ) return output_txt, output_ids def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer UpperCAmelCase : Tuple = "Unicode €." UpperCAmelCase : int = tokenizer(snake_case ) UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" ) UpperCAmelCase : Tuple = tokenizer("e è é ê ë" ) UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded["input_ids"] , snake_case ) # decoding UpperCAmelCase : Dict = tokenizer.decode(snake_case ) self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) self.assertIsInstance(snake_case , snake_case ) if FRAMEWORK != "jax": UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(snake_case , snake_case ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , snake_case ) self.assertIn("attention_mask" , snake_case ) self.assertNotIn("decoder_input_ids" , snake_case ) self.assertNotIn("decoder_attention_mask" , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.perceiver_tokenizer UpperCAmelCase : int = [ "Summary of the text.", "Another summary.", ] UpperCAmelCase : List[Any] = tokenizer( text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case ) self.assertEqual(3_2 , targets["input_ids"].shape[1] ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test UpperCAmelCase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : Dict = tempfile.mkdtemp() UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) shutil.rmtree(snake_case ) UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase : str = tempfile.mkdtemp() UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) UpperCAmelCase : int = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) tokenizer.save_pretrained(snake_case ) UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case ) UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(snake_case ) with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Union[str, Any] = json.load(snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: UpperCAmelCase : Any = json.load(snake_case ) UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )] UpperCAmelCase : List[Any] = added_tokens_extra_ids + [ "an_additional_special_token" ] UpperCAmelCase : List[str] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(snake_case , snake_case ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained( snake_case , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )] UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained( snake_case , additional_special_tokens=snake_case , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , "�" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case ) self.assertIsInstance(snake_case , snake_case )
679
1
'''simple docstring''' def lowercase ( __magic_name__ = 10**9 ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Optional[int] = 2 UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : List[Any] = 0 UpperCAmelCase : Union[str, Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'{solution() = }')
679
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : str = { "snap-research/efficientformer-l1-300": ( "https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json" ), } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer" def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : Any = hidden_act UpperCAmelCase : Optional[Any] = hidden_dropout_prob UpperCAmelCase : List[Any] = hidden_sizes UpperCAmelCase : str = num_hidden_layers UpperCAmelCase : int = num_attention_heads UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : str = layer_norm_eps UpperCAmelCase : int = patch_size UpperCAmelCase : Optional[int] = num_channels UpperCAmelCase : Any = depths UpperCAmelCase : Dict = mlp_expansion_ratio UpperCAmelCase : List[str] = downsamples UpperCAmelCase : List[Any] = dim UpperCAmelCase : Any = key_dim UpperCAmelCase : List[str] = attention_ratio UpperCAmelCase : Union[str, Any] = resolution UpperCAmelCase : List[str] = pool_size UpperCAmelCase : Dict = downsample_patch_size UpperCAmelCase : Optional[int] = downsample_stride UpperCAmelCase : Any = downsample_pad UpperCAmelCase : int = drop_path_rate UpperCAmelCase : Optional[Any] = num_metaad_blocks UpperCAmelCase : List[str] = distillation UpperCAmelCase : int = use_layer_scale UpperCAmelCase : List[str] = layer_scale_init_value UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Any = batch_norm_eps
679
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[str] = logging.get_logger(__name__) a : Union[str, Any] = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = "wav2vec2" def __init__( self , snake_case=3_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=1e-5 , snake_case="group" , snake_case="gelu" , snake_case=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case=(5, 2, 2, 2, 2, 2, 2) , snake_case=(1_0, 3, 3, 3, 3, 2, 2) , snake_case=False , snake_case=1_2_8 , snake_case=1_6 , snake_case=False , snake_case=True , snake_case=0.05 , snake_case=1_0 , snake_case=2 , snake_case=0.0 , snake_case=1_0 , snake_case=0 , snake_case=3_2_0 , snake_case=2 , snake_case=0.1 , snake_case=1_0_0 , snake_case=2_5_6 , snake_case=2_5_6 , snake_case=0.1 , snake_case="sum" , snake_case=False , snake_case=False , snake_case=2_5_6 , snake_case=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case=(5, 3, 3, 1, 1) , snake_case=(1, 2, 3, 1, 1) , snake_case=5_1_2 , snake_case=0 , snake_case=1 , snake_case=2 , snake_case=False , snake_case=3 , snake_case=2 , snake_case=3 , snake_case=None , snake_case=None , **snake_case , ): '''simple docstring''' super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) UpperCAmelCase : str = hidden_size UpperCAmelCase : Dict = feat_extract_norm UpperCAmelCase : List[str] = feat_extract_activation UpperCAmelCase : Dict = list(snake_case ) UpperCAmelCase : List[str] = list(snake_case ) UpperCAmelCase : Tuple = list(snake_case ) UpperCAmelCase : Optional[Any] = conv_bias UpperCAmelCase : Dict = num_conv_pos_embeddings UpperCAmelCase : str = num_conv_pos_embedding_groups UpperCAmelCase : List[str] = len(self.conv_dim ) UpperCAmelCase : Any = num_hidden_layers UpperCAmelCase : str = intermediate_size UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : str = hidden_dropout UpperCAmelCase : Tuple = attention_dropout UpperCAmelCase : Any = activation_dropout UpperCAmelCase : str = feat_proj_dropout UpperCAmelCase : Any = final_dropout UpperCAmelCase : List[str] = layerdrop UpperCAmelCase : List[str] = layer_norm_eps UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[int] = vocab_size UpperCAmelCase : Tuple = do_stable_layer_norm UpperCAmelCase : Optional[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : str = apply_spec_augment UpperCAmelCase : int = mask_time_prob UpperCAmelCase : int = mask_time_length UpperCAmelCase : Union[str, Any] = mask_time_min_masks UpperCAmelCase : Optional[int] = mask_feature_prob UpperCAmelCase : Optional[int] = mask_feature_length UpperCAmelCase : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCAmelCase : Any = num_codevectors_per_group UpperCAmelCase : Optional[Any] = num_codevector_groups UpperCAmelCase : Tuple = contrastive_logits_temperature UpperCAmelCase : int = feat_quantizer_dropout UpperCAmelCase : Any = num_negatives UpperCAmelCase : Dict = codevector_dim UpperCAmelCase : Dict = proj_codevector_dim UpperCAmelCase : str = diversity_loss_weight # ctc loss UpperCAmelCase : Optional[Any] = ctc_loss_reduction UpperCAmelCase : Any = ctc_zero_infinity # adapter UpperCAmelCase : Union[str, Any] = add_adapter UpperCAmelCase : Any = adapter_kernel_size UpperCAmelCase : Any = adapter_stride UpperCAmelCase : Any = num_adapter_layers UpperCAmelCase : List[Any] = output_hidden_size or hidden_size UpperCAmelCase : Union[str, Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase : Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : int = list(snake_case ) UpperCAmelCase : Any = list(snake_case ) UpperCAmelCase : Optional[Any] = list(snake_case ) UpperCAmelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
679
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=1_0 , snake_case=[1_0, 2_0, 3_0, 4_0] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : Dict = parent UpperCAmelCase : int = batch_size UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : List[str] = embeddings_size UpperCAmelCase : Any = hidden_sizes UpperCAmelCase : int = depths UpperCAmelCase : List[str] = is_training UpperCAmelCase : List[str] = use_labels UpperCAmelCase : int = hidden_act UpperCAmelCase : Union[str, Any] = num_labels UpperCAmelCase : str = scope UpperCAmelCase : str = len(snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : List[Any] = None if self.use_labels: UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self ): '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = TFResNetModel(config=snake_case ) UpperCAmelCase : int = model(snake_case ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def A_ ( self , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : List[Any] = TFResNetForImageClassification(snake_case ) UpperCAmelCase : Union[str, Any] = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = config_and_inputs UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ : Optional[int] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = TFResNetModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case ) def A_ ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self ): '''simple docstring''' return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def A_ ( self ): '''simple docstring''' pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(snake_case ) UpperCAmelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] = [*signature.parameters.keys()] UpperCAmelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def A_ ( self ): '''simple docstring''' def check_hidden_states_output(snake_case , snake_case , snake_case ): UpperCAmelCase : Optional[Any] = model_class(snake_case ) UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) ) UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase : List[str] = self.model_tester.num_stages self.assertEqual(len(snake_case ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase : str = layer_type UpperCAmelCase : Optional[Any] = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : str = True check_hidden_states_output(snake_case , snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def A_ ( self ): '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFResNetModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def lowercase ( ): '''simple docstring''' UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase : Union[str, Any] = self.default_image_processor UpperCAmelCase : Tuple = prepare_img() UpperCAmelCase : str = image_processor(images=snake_case , return_tensors="tf" ) # forward pass UpperCAmelCase : Any = model(**snake_case ) # verify the logits UpperCAmelCase : Any = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case ) UpperCAmelCase : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case , atol=1e-4 ) )
679
1
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path a : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) a : list[int] = [ord(letter) for letter in string.ascii_lowercase] a : set[int] = {ord(char) for char in VALID_CHARS} a : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = "" UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int for keychar, cipherchar in zip(cycle(__magic_name__ ) , __magic_name__ ): UpperCAmelCase : Dict = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__magic_name__ ) return decoded def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : list[str] = [] for key in product(__magic_name__ , repeat=3 ): UpperCAmelCase : int = try_key(__magic_name__ , __magic_name__ ) if encoded is not None: possibles.append(__magic_name__ ) return possibles def lowercase ( __magic_name__ , __magic_name__ ): '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def lowercase ( __magic_name__ = "p059_cipher.txt" ): '''simple docstring''' UpperCAmelCase : list[int] UpperCAmelCase : list[str] UpperCAmelCase : str UpperCAmelCase : str UpperCAmelCase : str = Path(__magic_name__ ).parent.joinpath(__magic_name__ ).read_text(encoding="utf-8" ) UpperCAmelCase : str = [int(__magic_name__ ) for number in data.strip().split("," )] UpperCAmelCase : List[str] = filter_valid_chars(__magic_name__ ) for common_word in COMMON_WORDS: UpperCAmelCase : Any = filter_common_word(__magic_name__ , __magic_name__ ) if len(__magic_name__ ) == 1: break UpperCAmelCase : List[str] = possibles[0] return sum(ord(__magic_name__ ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
679
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=9_9 , snake_case=6_4 , snake_case=5 , snake_case=4 , snake_case=6_4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): '''simple docstring''' UpperCAmelCase : List[Any] = parent UpperCAmelCase : List[str] = batch_size UpperCAmelCase : int = seq_length UpperCAmelCase : Dict = is_training UpperCAmelCase : Optional[Any] = use_input_mask UpperCAmelCase : Optional[Any] = use_token_type_ids UpperCAmelCase : Optional[Any] = use_labels UpperCAmelCase : int = vocab_size UpperCAmelCase : Optional[int] = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : int = hidden_dropout_prob UpperCAmelCase : Tuple = attention_probs_dropout_prob UpperCAmelCase : Any = max_position_embeddings UpperCAmelCase : Tuple = type_vocab_size UpperCAmelCase : Union[str, Any] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : Dict = num_labels UpperCAmelCase : Union[str, Any] = num_choices UpperCAmelCase : List[Any] = scope def A_ ( self ): '''simple docstring''' return MPNetConfig.from_pretrained("microsoft/mpnet-base" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : Optional[Any] = None UpperCAmelCase : str = None UpperCAmelCase : Dict = None if self.use_labels: UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MPNetModel(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : int = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : int = MPNetForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Dict = model( snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : Optional[int] = MPNetForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.num_choices UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase : Tuple = model( snake_case , attention_mask=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = MPNetForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : int = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : str = config_and_inputs UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ : Any = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : str = True def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = MPNetModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 ) def A_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = MPNetModel.from_pretrained("microsoft/mpnet-base" ) UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase : Optional[Any] = model(snake_case )[0] UpperCAmelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case ) UpperCAmelCase : Optional[Any] = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
679
1
'''simple docstring''' import numpy as np def lowercase ( __magic_name__ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowercase ( __magic_name__ ): '''simple docstring''' return vector * sigmoid(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
679
'''simple docstring''' import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() a : Optional[Any] = logging.get_logger(__name__) a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." ) if tokenizer_name is None: UpperCAmelCase : List[str] = TOKENIZER_CLASSES else: UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )} logger.info(F"Loading tokenizer classes: {tokenizer_names}" ) for tokenizer_name in tokenizer_names: UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name] UpperCAmelCase : Union[str, Any] = True if checkpoint_name is None: UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() ) else: UpperCAmelCase : Dict = [checkpoint_name] logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" ) for checkpoint in checkpoint_names: logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" ) # Load tokenizer UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ ) # Save fast tokenizer logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" ) # For organization names we create sub-directories if "/" in checkpoint: UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" ) UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ ) elif add_prefix: UpperCAmelCase : List[Any] = checkpoint UpperCAmelCase : str = dump_path else: UpperCAmelCase : List[str] = None UpperCAmelCase : List[Any] = dump_path logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0] if next_char == "/": UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ ) UpperCAmelCase : Dict = None logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" ) UpperCAmelCase : Any = tokenizer.save_pretrained( __magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ ) logger.info(F"=> File names {file_names}" ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(__magic_name__ ) logger.info(F"=> removing {file_name}" ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) a : Any = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
679
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig a : str = logging.get_logger(__name__) # General docstring a : Dict = "ResNetConfig" # Base docstring a : Any = "microsoft/resnet-50" a : int = [1, 20_48, 7, 7] # Image classification docstring a : Tuple = "microsoft/resnet-50" a : Tuple = "tiger cat" a : Optional[int] = [ "microsoft/resnet-50", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = nn.Convad( snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case ) UpperCAmelCase : List[Any] = nn.BatchNormad(snake_case ) UpperCAmelCase : str = ACTaFN[activation] if activation is not None else nn.Identity() def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Dict = self.convolution(snake_case ) UpperCAmelCase : List[str] = self.normalization(snake_case ) UpperCAmelCase : Optional[int] = self.activation(snake_case ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) UpperCAmelCase : Tuple = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) UpperCAmelCase : Any = config.num_channels def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) UpperCAmelCase : List[Any] = self.embedder(snake_case ) UpperCAmelCase : Optional[Any] = self.pooler(snake_case ) return embedding class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case = 2 ): '''simple docstring''' super().__init__() UpperCAmelCase : str = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case ) UpperCAmelCase : Union[str, Any] = nn.BatchNormad(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Tuple = self.convolution(snake_case ) UpperCAmelCase : Dict = self.normalization(snake_case ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ): '''simple docstring''' super().__init__() UpperCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 UpperCAmelCase : Optional[Any] = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase : List[str] = nn.Sequential( ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , ) UpperCAmelCase : Any = ACTaFN[activation] def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = hidden_state UpperCAmelCase : Union[str, Any] = self.layer(snake_case ) UpperCAmelCase : int = self.shortcut(snake_case ) hidden_state += residual UpperCAmelCase : int = self.activation(snake_case ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ): '''simple docstring''' super().__init__() UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1 UpperCAmelCase : Tuple = out_channels // reduction UpperCAmelCase : str = ( ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase : Any = nn.Sequential( ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , ) UpperCAmelCase : Any = ACTaFN[activation] def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = hidden_state UpperCAmelCase : str = self.layer(snake_case ) UpperCAmelCase : Dict = self.shortcut(snake_case ) hidden_state += residual UpperCAmelCase : int = self.activation(snake_case ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ): '''simple docstring''' super().__init__() UpperCAmelCase : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer UpperCAmelCase : Union[str, Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = input for layer in self.layers: UpperCAmelCase : Optional[int] = layer(snake_case ) return hidden_state class UpperCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__() UpperCAmelCase : Tuple = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ): self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) ) def A_ ( self , snake_case , snake_case = False , snake_case = True ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase : Any = hidden_states + (hidden_state,) UpperCAmelCase : Optional[int] = stage_module(snake_case ) if output_hidden_states: UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=snake_case , hidden_states=snake_case , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ResNetConfig SCREAMING_SNAKE_CASE__ : Tuple = "resnet" SCREAMING_SNAKE_CASE__ : List[Any] = "pixel_values" SCREAMING_SNAKE_CASE__ : Optional[int] = True def A_ ( self , snake_case ): '''simple docstring''' if isinstance(snake_case , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def A_ ( self , snake_case , snake_case=False ): '''simple docstring''' if isinstance(snake_case , snake_case ): UpperCAmelCase : List[str] = value a : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" a : Union[str, Any] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Optional[Any] = config UpperCAmelCase : List[Any] = ResNetEmbeddings(snake_case ) UpperCAmelCase : int = ResNetEncoder(snake_case ) UpperCAmelCase : str = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A_ ( self , snake_case , snake_case = None , snake_case = None ): '''simple docstring''' UpperCAmelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase : int = self.embedder(snake_case ) UpperCAmelCase : Any = self.encoder( snake_case , output_hidden_states=snake_case , return_dict=snake_case ) UpperCAmelCase : List[str] = encoder_outputs[0] UpperCAmelCase : List[Any] = self.pooler(snake_case ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : List[Any] = config.num_labels UpperCAmelCase : Tuple = ResNetModel(snake_case ) # classification head UpperCAmelCase : Optional[Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A_ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ): '''simple docstring''' UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase : List[str] = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) UpperCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase : Any = self.classifier(snake_case ) UpperCAmelCase : Any = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase : Optional[int] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase : Dict = "single_label_classification" else: UpperCAmelCase : int = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase : Optional[int] = MSELoss() if self.num_labels == 1: UpperCAmelCase : Dict = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase : Any = loss_fct(snake_case , snake_case ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase : int = CrossEntropyLoss() UpperCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase : Any = BCEWithLogitsLoss() UpperCAmelCase : Optional[Any] = loss_fct(snake_case , snake_case ) if not return_dict: UpperCAmelCase : Any = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ , lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) super()._init_backbone(snake_case ) UpperCAmelCase : Optional[Any] = [config.embedding_size] + config.hidden_sizes UpperCAmelCase : List[str] = ResNetEmbeddings(snake_case ) UpperCAmelCase : Optional[int] = ResNetEncoder(snake_case ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(snake_case ) @replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC ) def A_ ( self , snake_case , snake_case = None , snake_case = None ): '''simple docstring''' UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase : Any = self.embedder(snake_case ) UpperCAmelCase : Optional[Any] = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case ) UpperCAmelCase : Dict = outputs.hidden_states UpperCAmelCase : Any = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: UpperCAmelCase : List[str] = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
679
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE__ : Dict = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE__ : List[str] = "image_qa" SCREAMING_SNAKE_CASE__ : int = AutoProcessor SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE__ : Any = ["image", "text"] SCREAMING_SNAKE_CASE__ : Optional[Any] = ["text"] def __init__( self , *snake_case , **snake_case ): '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*snake_case , **snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' return self.pre_processor(snake_case , snake_case , return_tensors="pt" ) def A_ ( self , snake_case ): '''simple docstring''' with torch.no_grad(): return self.model(**snake_case ).logits def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
679
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") a : str = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) SCREAMING_SNAKE_CASE__ : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ , metadata={"help": "The input training data file (a text file)."} ) SCREAMING_SNAKE_CASE__ : Optional[str] = field( default=lowercase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE__ : bool = field( default=lowercase__ , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE__ : Optional[int] = field( default=lowercase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def A_ ( self ): '''simple docstring''' if self.train_file is not None: UpperCAmelCase : Optional[Any] = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: UpperCAmelCase : Tuple = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : PreTrainedTokenizerBase SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE__ : Optional[int] = None SCREAMING_SNAKE_CASE__ : Optional[int] = None def __call__( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = "label" if "label" in features[0].keys() else "labels" UpperCAmelCase : int = [feature.pop(snake_case ) for feature in features] UpperCAmelCase : int = len(snake_case ) UpperCAmelCase : List[str] = len(features[0]["input_ids"] ) UpperCAmelCase : Tuple = [ [{k: v[i] for k, v in feature.items()} for i in range(snake_case )] for feature in features ] UpperCAmelCase : Optional[int] = list(chain(*snake_case ) ) UpperCAmelCase : Union[str, Any] = self.tokenizer.pad( snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten UpperCAmelCase : Dict = {k: v.view(snake_case , snake_case , -1 ) for k, v in batch.items()} # Add back labels UpperCAmelCase : List[str] = torch.tensor(snake_case , dtype=torch.intaa ) return batch def lowercase ( ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag" , __magic_name__ , __magic_name__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase : int = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) datasets.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. UpperCAmelCase : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: UpperCAmelCase : Optional[int] = {} if data_args.train_file is not None: UpperCAmelCase : Dict = data_args.train_file if data_args.validation_file is not None: UpperCAmelCase : Optional[Any] = data_args.validation_file UpperCAmelCase : Any = data_args.train_file.split("." )[-1] UpperCAmelCase : Union[str, Any] = load_dataset( __magic_name__ , data_files=__magic_name__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. UpperCAmelCase : Optional[Any] = load_dataset( "swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. UpperCAmelCase : Optional[Any] = [F"ending{i}" for i in range(4 )] UpperCAmelCase : int = "sent1" UpperCAmelCase : int = "sent2" if data_args.max_seq_length is None: UpperCAmelCase : Tuple = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) UpperCAmelCase : List[str] = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) UpperCAmelCase : Dict = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__magic_name__ ): UpperCAmelCase : str = [[context] * 4 for context in examples[context_name]] UpperCAmelCase : Optional[int] = examples[question_header_name] UpperCAmelCase : List[Any] = [ [F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(__magic_name__ ) ] # Flatten out UpperCAmelCase : Any = list(chain(*__magic_name__ ) ) UpperCAmelCase : Optional[Any] = list(chain(*__magic_name__ ) ) # Tokenize UpperCAmelCase : str = tokenizer( __magic_name__ , __magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__magic_name__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) UpperCAmelCase : Union[str, Any] = raw_datasets["train"] if data_args.max_train_samples is not None: UpperCAmelCase : Tuple = min(len(__magic_name__ ) , data_args.max_train_samples ) UpperCAmelCase : Dict = train_dataset.select(range(__magic_name__ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): UpperCAmelCase : List[Any] = train_dataset.map( __magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) UpperCAmelCase : Any = raw_datasets["validation"] if data_args.max_eval_samples is not None: UpperCAmelCase : str = min(len(__magic_name__ ) , data_args.max_eval_samples ) UpperCAmelCase : Tuple = eval_dataset.select(range(__magic_name__ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): UpperCAmelCase : Optional[Any] = eval_dataset.map( __magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator UpperCAmelCase : Any = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__magic_name__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__magic_name__ ): UpperCAmelCase , UpperCAmelCase : Dict = eval_predictions UpperCAmelCase : Dict = np.argmax(__magic_name__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer UpperCAmelCase : Tuple = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , compute_metrics=__magic_name__ , ) # Training if training_args.do_train: UpperCAmelCase : Tuple = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase : str = last_checkpoint UpperCAmelCase : Optional[int] = trainer.train(resume_from_checkpoint=__magic_name__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCAmelCase : Tuple = train_result.metrics UpperCAmelCase : Any = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__magic_name__ ) ) UpperCAmelCase : Any = min(__magic_name__ , len(__magic_name__ ) ) trainer.log_metrics("train" , __magic_name__ ) trainer.save_metrics("train" , __magic_name__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) UpperCAmelCase : List[Any] = trainer.evaluate() UpperCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__magic_name__ ) UpperCAmelCase : Union[str, Any] = min(__magic_name__ , len(__magic_name__ ) ) trainer.log_metrics("eval" , __magic_name__ ) trainer.save_metrics("eval" , __magic_name__ ) UpperCAmelCase : Optional[Any] = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) def lowercase ( __magic_name__ ): '''simple docstring''' main() if __name__ == "__main__": main()
679
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging a : Optional[int] = logging.get_logger(__name__) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = R"\w+[.]\d+" UpperCAmelCase : Dict = re.findall(__magic_name__ , __magic_name__ ) for pat in pats: UpperCAmelCase : Tuple = key.replace(__magic_name__ , "_".join(pat.split("." ) ) ) return key def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": UpperCAmelCase : Union[str, Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase ( __magic_name__ , __magic_name__ , __magic_name__=42 ): '''simple docstring''' UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params UpperCAmelCase : Tuple = flax_model.init_weights(PRNGKey(__magic_name__ ) ) UpperCAmelCase : Optional[Any] = flatten_dict(__magic_name__ ) UpperCAmelCase : List[str] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Tuple = rename_key(__magic_name__ ) UpperCAmelCase : List[str] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(__magic_name__ , __magic_name__ , __magic_name__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown UpperCAmelCase : Optional[int] = jnp.asarray(__magic_name__ ) return unflatten_dict(__magic_name__ )
679
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING a : Union[str, Any] = logging.get_logger(__name__) a : Optional[Any] = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "instructblip_vision_model" def __init__( self , snake_case=1_4_0_8 , snake_case=6_1_4_4 , snake_case=3_9 , snake_case=1_6 , snake_case=2_2_4 , snake_case=1_4 , snake_case="gelu" , snake_case=1e-6 , snake_case=0.0 , snake_case=1e-10 , snake_case=True , **snake_case , ): '''simple docstring''' super().__init__(**snake_case ) UpperCAmelCase : str = hidden_size UpperCAmelCase : Any = intermediate_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Union[str, Any] = num_attention_heads UpperCAmelCase : Optional[Any] = patch_size UpperCAmelCase : Tuple = image_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : int = attention_dropout UpperCAmelCase : Optional[Any] = layer_norm_eps UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : str = qkv_bias @classmethod def A_ ( cls , snake_case , **snake_case ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) UpperCAmelCase , UpperCAmelCase : Optional[int] = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": UpperCAmelCase : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case , **snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "instructblip_qformer" def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=0.02 , snake_case=1e-12 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1_4_0_8 , **snake_case , ): '''simple docstring''' super().__init__(pad_token_id=snake_case , **snake_case ) UpperCAmelCase : Dict = vocab_size UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : Union[str, Any] = hidden_act UpperCAmelCase : Union[str, Any] = intermediate_size UpperCAmelCase : Any = hidden_dropout_prob UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Union[str, Any] = initializer_range UpperCAmelCase : Optional[Any] = layer_norm_eps UpperCAmelCase : Any = position_embedding_type UpperCAmelCase : Any = cross_attention_frequency UpperCAmelCase : Optional[int] = encoder_hidden_size @classmethod def A_ ( cls , snake_case , **snake_case ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(snake_case , **snake_case ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": UpperCAmelCase : Optional[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case , **snake_case ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = "instructblip" SCREAMING_SNAKE_CASE__ : Optional[int] = True def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=3_2 , **snake_case ): '''simple docstring''' super().__init__(**snake_case ) if vision_config is None: UpperCAmelCase : Optional[int] = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: UpperCAmelCase : Dict = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: UpperCAmelCase : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) UpperCAmelCase : Union[str, Any] = InstructBlipVisionConfig(**snake_case ) UpperCAmelCase : Tuple = InstructBlipQFormerConfig(**snake_case ) UpperCAmelCase : Dict = text_config["model_type"] if "model_type" in text_config else "opt" UpperCAmelCase : Dict = CONFIG_MAPPING[text_model_type](**snake_case ) UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings UpperCAmelCase : Union[str, Any] = self.text_config.is_encoder_decoder UpperCAmelCase : Any = num_query_tokens UpperCAmelCase : int = self.vision_config.hidden_size UpperCAmelCase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES UpperCAmelCase : int = 1.0 UpperCAmelCase : Optional[int] = 0.02 @classmethod def A_ ( cls , snake_case , snake_case , snake_case , **snake_case , ): '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case , ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase : List[str] = self.vision_config.to_dict() UpperCAmelCase : List[Any] = self.qformer_config.to_dict() UpperCAmelCase : Dict = self.text_config.to_dict() UpperCAmelCase : Optional[int] = self.__class__.model_type return output
679
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = (EulerDiscreteScheduler,) SCREAMING_SNAKE_CASE__ : List[Any] = 10 def A_ ( self , **snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**snake_case ) return config def A_ ( self ): '''simple docstring''' for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=snake_case ) def A_ ( self ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case , beta_end=snake_case ) def A_ ( self ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case ) def A_ ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase : Union[str, Any] = self.get_scheduler_config() UpperCAmelCase : Optional[Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = self.dummy_model() UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : Any = sample.to(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : Tuple = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : List[Any] = model(snake_case , snake_case ) UpperCAmelCase : str = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Dict = output.prev_sample UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : List[Any] = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : int = self.get_scheduler_config(prediction_type="v_prediction" ) UpperCAmelCase : List[Any] = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps ) UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : Dict = self.dummy_model() UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCAmelCase : int = sample.to(snake_case ) for i, t in enumerate(scheduler.timesteps ): UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : List[Any] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Any = output.prev_sample UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = self.scheduler_classes[0] UpperCAmelCase : Optional[int] = self.get_scheduler_config() UpperCAmelCase : Any = scheduler_class(**snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case ) UpperCAmelCase : List[Any] = torch.manual_seed(0 ) UpperCAmelCase : int = self.dummy_model() UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase : str = sample.to(snake_case ) for t in scheduler.timesteps: UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : List[Any] = model(snake_case , snake_case ) UpperCAmelCase : List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : Dict = output.prev_sample UpperCAmelCase : Optional[int] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.scheduler_classes[0] UpperCAmelCase : Tuple = self.get_scheduler_config() UpperCAmelCase : Dict = scheduler_class(**snake_case , use_karras_sigmas=snake_case ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case ) UpperCAmelCase : List[str] = torch.manual_seed(0 ) UpperCAmelCase : Any = self.dummy_model() UpperCAmelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() UpperCAmelCase : List[str] = sample.to(snake_case ) for t in scheduler.timesteps: UpperCAmelCase : str = scheduler.scale_model_input(snake_case , snake_case ) UpperCAmelCase : Dict = model(snake_case , snake_case ) UpperCAmelCase : Dict = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ) UpperCAmelCase : List[str] = output.prev_sample UpperCAmelCase : int = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase : Any = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
679
1
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase : Tuple = input_file.read() UpperCAmelCase : List[Any] = regexp.search(snake_case ) return match def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase : str = regexp.finditer(snake_case ) UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = Path("./datasets" ) UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Path("./datasets" ) UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
679
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : Dict = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase : Tuple = input_file.read() UpperCAmelCase : List[Any] = regexp.search(snake_case ) return match def A_ ( self , snake_case ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as input_file: UpperCAmelCase : List[str] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase : str = regexp.finditer(snake_case ) UpperCAmelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = Path("./datasets" ) UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case ) ): raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = Path("./datasets" ) UpperCAmelCase : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case ) ): raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
679
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : str = KandinskyVaaControlnetImgaImgPipeline SCREAMING_SNAKE_CASE__ : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"] SCREAMING_SNAKE_CASE__ : int = ["image_embeds", "negative_image_embeds", "image", "hint"] SCREAMING_SNAKE_CASE__ : Dict = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] SCREAMING_SNAKE_CASE__ : List[Any] = False @property def A_ ( self ): '''simple docstring''' return 3_2 @property def A_ ( self ): '''simple docstring''' return 3_2 @property def A_ ( self ): '''simple docstring''' return self.time_input_dim @property def A_ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self ): '''simple docstring''' return 1_0_0 @property def A_ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Dict = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCAmelCase : Optional[Any] = UNetaDConditionModel(**snake_case ) return model @property def A_ ( self ): '''simple docstring''' return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def A_ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.dummy_unet UpperCAmelCase : Tuple = self.dummy_movq UpperCAmelCase : str = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_0085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } UpperCAmelCase : Tuple = DDIMScheduler(**snake_case ) UpperCAmelCase : str = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def A_ ( self , snake_case , snake_case=0 ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case ) UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( snake_case ) # create init_image UpperCAmelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case ) ).to(snake_case ) UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : Tuple = Image.fromarray(np.uinta(snake_case ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) # create hint UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case ) ).to(snake_case ) if str(snake_case ).startswith("mps" ): UpperCAmelCase : Union[str, Any] = torch.manual_seed(snake_case ) else: UpperCAmelCase : Tuple = torch.Generator(device=snake_case ).manual_seed(snake_case ) UpperCAmelCase : Optional[int] = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "cpu" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Any = self.pipeline_class(**snake_case ) UpperCAmelCase : List[str] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(snake_case ) ) UpperCAmelCase : Any = output.images UpperCAmelCase : List[Any] = pipe( **self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0] UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase : Dict = np.array( [0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def A_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) UpperCAmelCase : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCAmelCase : List[Any] = init_image.resize((5_1_2, 5_1_2) ) UpperCAmelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) UpperCAmelCase : Dict = torch.from_numpy(np.array(snake_case ) ).float() / 255.0 UpperCAmelCase : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) UpperCAmelCase : Any = "A robot, 4k photo" UpperCAmelCase : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(snake_case ) UpperCAmelCase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) UpperCAmelCase : Any = pipeline.to(snake_case ) pipeline.set_progress_bar_config(disable=snake_case ) UpperCAmelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = pipe_prior( snake_case , image=snake_case , strength=0.85 , generator=snake_case , negative_prompt="" , ).to_tuple() UpperCAmelCase : Optional[int] = pipeline( image=snake_case , image_embeds=snake_case , negative_image_embeds=snake_case , hint=snake_case , generator=snake_case , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , ) UpperCAmelCase : int = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(snake_case , snake_case )
679
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a : str = logging.getLogger(__name__) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ): '''simple docstring''' UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] ) UpperCAmelCase : Optional[int] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case ) self.init_weights() UpperCAmelCase : int = 0 UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : List[Any] = 0 def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[Any] = threshold def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : str = patience def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = 0 UpperCAmelCase : List[Any] = 0 def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num UpperCAmelCase : List[Any] = ( f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =" f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***" ) print(snake_case ) @add_start_docstrings_to_model_forward(snake_case ) def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ): '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: UpperCAmelCase : Dict = input_ids.size() elif inputs_embeds is not None: UpperCAmelCase : Any = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case ) if token_type_ids is None: UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size() UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: UpperCAmelCase : int = torch.ones(snake_case , device=snake_case ) UpperCAmelCase : str = self.invert_attention_mask(snake_case ) else: UpperCAmelCase : int = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers ) UpperCAmelCase : Tuple = self.embeddings( input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case ) UpperCAmelCase : int = embedding_output if self.training: UpperCAmelCase : int = [] for i in range(self.config.num_hidden_layers ): UpperCAmelCase : List[Any] = self.encoder.adaptive_forward( snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case ) UpperCAmelCase : Dict = self.pooler(snake_case ) UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) ) res.append(snake_case ) elif self.patience == 0: # Use all layers for inference UpperCAmelCase : Union[str, Any] = self.encoder( snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] ) UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )] else: UpperCAmelCase : int = 0 UpperCAmelCase : Optional[Any] = None UpperCAmelCase : Optional[Any] = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 UpperCAmelCase : Tuple = self.encoder.adaptive_forward( snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case ) UpperCAmelCase : Any = self.pooler(snake_case ) UpperCAmelCase : int = output_layers[i](snake_case ) if regression: UpperCAmelCase : Optional[Any] = logits.detach() if patient_result is not None: UpperCAmelCase : Union[str, Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: UpperCAmelCase : Optional[Any] = 0 else: UpperCAmelCase : Any = logits.detach().argmax(dim=1 ) if patient_result is not None: UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(snake_case ) ): patient_counter += 1 else: UpperCAmelCase : str = 0 UpperCAmelCase : int = logits if patient_counter == self.patience: break UpperCAmelCase : int = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , ) class UpperCamelCase__ ( lowercase__ ): """simple docstring""" def __init__( self , snake_case ): '''simple docstring''' super().__init__(snake_case ) UpperCAmelCase : Union[str, Any] = config.num_labels UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case ) UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase : Any = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(snake_case ) def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ): '''simple docstring''' UpperCAmelCase : int = self.bert( input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) UpperCAmelCase : Tuple = (logits[-1],) if labels is not None: UpperCAmelCase : Optional[int] = None UpperCAmelCase : List[Any] = 0 for ix, logits_item in enumerate(snake_case ): if self.num_labels == 1: # We are doing regression UpperCAmelCase : Dict = MSELoss() UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase : Optional[int] = CrossEntropyLoss() UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: UpperCAmelCase : int = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs return outputs
679
1
'''simple docstring''' def lowercase ( __magic_name__ = 10**12 ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Any = 0 UpperCAmelCase : List[Any] = 1 UpperCAmelCase : List[Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'{solution() = }')
679
'''simple docstring''' import math import tensorflow as tf from packaging import version def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype ) UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) )) return x * cdf def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ ) return x * tf.tanh(tf.math.softplus(__magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype ) UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ ) UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowercase ( __magic_name__ ): '''simple docstring''' return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 ) def lowercase ( __magic_name__ , __magic_name__=-1 ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ ) return a * tf.math.sigmoid(__magic_name__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def lowercase ( __magic_name__ ): '''simple docstring''' return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ ) a : Tuple = tf.keras.activations.gelu a : Dict = approximate_gelu_wrap else: a : List[str] = _gelu a : List[Any] = _gelu_new a : Optional[int] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def lowercase ( __magic_name__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
679
1