code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : Union[str, Any] = logging.get_logger(__name__) class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE): _lowerCAmelCase : Tuple = ["""pixel_values"""] def __init__( self : Optional[Any] , lowercase_ : Tuple = True , lowercase_ : Tuple = None , lowercase_ : int = PILImageResampling.BICUBIC , lowercase_ : Union[str, Any] = True , lowercase_ : Tuple = 1 / 255 , lowercase_ : str = True , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = None , lowercase_ : Tuple = True , **lowercase_ : Optional[int] , ): super().__init__(**lowercase_ ) snake_case_ : List[Any] = size if size is not None else {"""height""": 384, """width""": 384} snake_case_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) snake_case_ : str = do_resize snake_case_ : Any = size snake_case_ : Optional[Any] = resample snake_case_ : List[str] = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : Union[str, Any] = do_normalize snake_case_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ : Tuple = do_convert_rgb def _snake_case ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Any = PILImageResampling.BICUBIC , lowercase_ : Optional[Any] = None , **lowercase_ : Optional[Any] , ): snake_case_ : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" ) snake_case_ : Dict = (size["""height"""], size["""width"""]) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _snake_case ( self : Optional[Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[str] = None , **lowercase_ : Tuple , ): return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : str = None , **lowercase_ : Union[str, Any] , ): return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : Dict = None , lowercase_ : Tuple = None , lowercase_ : Any = None , lowercase_ : Dict = None , lowercase_ : int = None , lowercase_ : Any = None , lowercase_ : Dict = None , lowercase_ : int = None , lowercase_ : List[str] = None , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = ChannelDimension.FIRST , **lowercase_ : Tuple , ): snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : Union[str, Any] = resample if resample is not None else self.resample snake_case_ : int = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : str = image_mean if image_mean is not None else self.image_mean snake_case_ : Tuple = image_std if image_std is not None else self.image_std snake_case_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ : List[str] = size if size is not None else self.size snake_case_ : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) snake_case_ : str = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ : Tuple = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. snake_case_ : Dict = [to_numpy_array(lowercase_ ) for image in images] if do_resize: snake_case_ : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_rescale: snake_case_ : Dict = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: snake_case_ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] snake_case_ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] snake_case_ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowercase_ ) return encoded_outputs
123
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' class _UpperCamelCase : '''simple docstring''' def __init__( self , a_ ) -> Any: lowercase : Optional[int] = set_counts lowercase : Optional[int] = max(a_ ) lowercase : int = len(a_ ) lowercase : Any = [1] * num_sets lowercase : List[str] = list(range(a_ ) ) def a__ ( self , a_ , a_ ) -> Tuple: lowercase : Optional[Any] = self.get_parent(a_ ) lowercase : Tuple = self.get_parent(a_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase : Any = 0 lowercase : int = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase : int = 0 lowercase : int = src_parent lowercase : Optional[int] = self.set_counts[src_parent] lowercase : Tuple = max(self.max_set , a_ ) return True def a__ ( self , a_ ) -> Dict: if self.parents[disj_set] == disj_set: return disj_set lowercase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
372
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a_ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['ConvNextFeatureExtractor'] a_ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
25
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class A_ ( unittest.TestCase ): '''simple docstring''' _lowerCAmelCase = StableDiffusionLDMaDPipeline _lowerCAmelCase = TEXT_TO_IMAGE_PARAMS _lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def a ( self ): torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _UpperCamelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) _UpperCamelCase = CLIPTextModel(A_ ) _UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCamelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a ( self , A_ , A_=0 ): if str(A_ ).startswith("mps" ): _UpperCamelCase = torch.manual_seed(A_ ) else: _UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a ( self ): _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionLDMaDPipeline(**A_ ) _UpperCamelCase = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = rgb[0, -3:, -3:, -1] _UpperCamelCase = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) _UpperCamelCase = np.array( [0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] ) _UpperCamelCase = np.array([103.4_6727, 85.81_2004, 87.84_9236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def a ( self ): _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionLDMaDPipeline(**A_ ) _UpperCamelCase = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = 3 * [inputs["""prompt"""]] # forward _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1] _UpperCamelCase = depth_slice_a[0, -3:, -1] _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = 3 * [inputs.pop("prompt" )] _UpperCamelCase = ldmad_pipe.tokenizer( A_ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=A_ , return_tensors="pt" , ) _UpperCamelCase = text_inputs["""input_ids"""].to(A_ ) _UpperCamelCase = ldmad_pipe.text_encoder(A_ )[0] _UpperCamelCase = prompt_embeds # forward _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1] _UpperCamelCase = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def a ( self ): _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ ) _UpperCamelCase = StableDiffusionLDMaDPipeline(**A_ ) _UpperCamelCase = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = """french fries""" _UpperCamelCase = ldmad_pipe(**A_ , negative_prompt=A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = rgb[0, -3:, -3:, -1] _UpperCamelCase = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) _UpperCamelCase = np.array( [0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] ) _UpperCamelCase = np.array([107.8_4738, 84.6_2802, 89.96_2135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ): _UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) ) _UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) _UpperCamelCase = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a ( self ): _UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) _UpperCamelCase = ldmad_pipe.to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_inputs(A_ ) _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = rgb[0, -3:, -3:, -1].flatten() _UpperCamelCase = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12) _UpperCamelCase = np.array( [0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] ) _UpperCamelCase = np.array( [0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class A_ ( unittest.TestCase ): '''simple docstring''' def a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ): _UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 64, 64) ) _UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) _UpperCamelCase = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a ( self ): _UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_inputs(A_ ) _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = 0.49_5586 _UpperCamelCase = 0.3379_5515 _UpperCamelCase = 112.4_8518 _UpperCamelCase = 98.48_9746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def a ( self ): _UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(A_ ) ldmad_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_inputs(A_ ) _UpperCamelCase = ldmad_pipe(**A_ ) _UpperCamelCase = output.rgb, output.depth _UpperCamelCase = 0.419_4127 _UpperCamelCase = 0.3537_5586 _UpperCamelCase = 0.563_8502 _UpperCamelCase = 0.3468_6103 assert rgb.shape == (1, 5_12, 5_12, 3) assert depth.shape == (1, 5_12, 5_12, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
138
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib A_ : List[str] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } A_ : int = logging.WARNING def __snake_case ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = os.getenv('DATASETS_VERBOSITY' , __A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def __snake_case ( ) -> Dict: '''simple docstring''' return __name__.split('.' )[0] def __snake_case ( ) -> List[Any]: '''simple docstring''' return logging.getLogger(_get_library_name() ) def __snake_case ( ) -> Optional[Any]: '''simple docstring''' # Apply our default configuration to the library root logger. SCREAMING_SNAKE_CASE : str = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __snake_case ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __snake_case ( __A : str = None ) -> Optional[Any]: '''simple docstring''' if name is None: SCREAMING_SNAKE_CASE : Optional[int] = _get_library_name() return logging.getLogger(__A ) def __snake_case ( ) -> str: '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def __snake_case ( __A : List[str] ) -> Dict: '''simple docstring''' _get_library_root_logger().setLevel(__A ) def __snake_case ( ) -> Any: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> Optional[Any]: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> str: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> List[str]: '''simple docstring''' return set_verbosity(__A ) def __snake_case ( ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = False def __snake_case ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class lowerCAmelCase__ : '''simple docstring''' def __init__( self : List[Any] , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: # pylint: disable=unused-argument """simple docstring""" SCREAMING_SNAKE_CASE : int = args[0] if args else None def __iter__( self : List[Any] ) -> Tuple: """simple docstring""" return iter(self._iterator ) def __getattr__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" def empty_fn(*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : str ) -> List[str]: """simple docstring""" return self def __exit__( self : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" return A_ : str = True class lowerCAmelCase__ : '''simple docstring''' def __call__( self : Any , *_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str]=False , **_SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : List[Any] , *_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : str ) -> Any: """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() A_ : Dict = _tqdm_cls() def __snake_case ( ) -> int: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __snake_case ( ) -> Any: '''simple docstring''' global _tqdm_active SCREAMING_SNAKE_CASE : List[str] = True def __snake_case ( ) -> int: '''simple docstring''' global _tqdm_active SCREAMING_SNAKE_CASE : List[Any] = False
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCAmelCase : '''simple docstring''' def __init__( self , snake_case_ , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=10 , snake_case_=[10, 20, 30, 40] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ): '''simple docstring''' A__ : int = parent A__ : List[str] = batch_size A__ : Optional[Any] = image_size A__ : Optional[Any] = num_channels A__ : Optional[Any] = embeddings_size A__ : Union[str, Any] = hidden_sizes A__ : Any = depths A__ : Dict = is_training A__ : List[Any] = use_labels A__ : int = hidden_act A__ : Tuple = num_labels A__ : int = scope A__ : Optional[int] = len(snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : int = None if self.use_labels: A__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) A__ : str = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : int = RegNetModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() A__ : str = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : str = self.num_labels A__ : Dict = RegNetForImageClassification(snake_case_ ) model.to(snake_case_ ) model.eval() A__ : str = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Dict = self.prepare_config_and_inputs() A__ : Any = config_and_inputs A__ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _UpperCamelCase : Tuple = ( {"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : List[Any] = False _UpperCamelCase : int = False _UpperCamelCase : str = False _UpperCamelCase : List[Any] = False def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = RegNetModelTester(self ) A__ : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self ): '''simple docstring''' return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : List[str] = model_class(snake_case_ ) A__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : List[str] = [*signature.parameters.keys()] A__ : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : Optional[int] = model_class(config=snake_case_ ) for name, module in model.named_modules(): if isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ): A__ : Tuple = model_class(snake_case_ ) model.to(snake_case_ ) model.eval() with torch.no_grad(): A__ : Optional[int] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) A__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() A__ : Any = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: A__ : str = layer_type A__ : Tuple = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ : int = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : str = RegNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _A( ): A__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __UpperCAmelCase (unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase ( self ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : Tuple = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case_ ) A__ : int = self.default_image_processor A__ : str = prepare_img() A__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ ) # forward pass with torch.no_grad(): A__ : Any = model(**snake_case_ ) # verify the logits A__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) A__ : List[Any] = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] ): """simple docstring""" a_ : Tuple = BertConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(F"""Building PyTorch model from configuration: {config}""" ) a_ : Optional[int] = BertForPreTraining(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
419
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class a__ ( __SCREAMING_SNAKE_CASE ): __magic_name__ : Dict = """luke""" def __init__(self : Tuple, __UpperCAmelCase : Dict=50267, __UpperCAmelCase : str=500000, __UpperCAmelCase : Optional[int]=768, __UpperCAmelCase : List[str]=256, __UpperCAmelCase : Union[str, Any]=12, __UpperCAmelCase : Any=12, __UpperCAmelCase : Optional[Any]=3072, __UpperCAmelCase : Any="gelu", __UpperCAmelCase : int=0.1, __UpperCAmelCase : Optional[Any]=0.1, __UpperCAmelCase : str=512, __UpperCAmelCase : int=2, __UpperCAmelCase : Optional[Any]=0.02, __UpperCAmelCase : Union[str, Any]=1e-12, __UpperCAmelCase : Dict=True, __UpperCAmelCase : Any=None, __UpperCAmelCase : Union[str, Any]=1, __UpperCAmelCase : Optional[int]=0, __UpperCAmelCase : Optional[int]=2, **__UpperCAmelCase : int, ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : List[str] = entity_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : str = entity_emb_size SCREAMING_SNAKE_CASE : str = num_hidden_layers SCREAMING_SNAKE_CASE : Any = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = hidden_act SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Any = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = use_entity_aware_attention SCREAMING_SNAKE_CASE : int = classifier_dropout
507
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCAmelCase_ : str = ['''small''', '''medium''', '''large'''] UpperCAmelCase_ : Dict = '''lm_head.decoder.weight''' UpperCAmelCase_ : List[Any] = '''lm_head.weight''' def _lowerCAmelCase(a : Dict , a : List[Any] ) -> Optional[Any]: _SCREAMING_SNAKE_CASE =torch.load(a ) _SCREAMING_SNAKE_CASE =d.pop(a ) os.makedirs(a , exist_ok=a ) torch.save(a , os.path.join(a , a ) ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) UpperCAmelCase_ : Any = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCAmelCase_ : int = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") UpperCAmelCase_ : int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
255
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging _SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Any = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[str] = """bloom""" SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""] SCREAMING_SNAKE_CASE : List[str] = { """num_hidden_layers""": """n_layer""", """num_attention_heads""": """n_head""", } def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[int]=250880 , __SCREAMING_SNAKE_CASE : Optional[Any]=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : int=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]: lowerCamelCase_ = vocab_size # Backward compatibility with n_embed kwarg lowerCamelCase_ = kwargs.pop('n_embed' , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = hidden_size if n_embed is None else n_embed lowerCamelCase_ = n_layer lowerCamelCase_ = n_head lowerCamelCase_ = layer_norm_epsilon lowerCamelCase_ = initializer_range lowerCamelCase_ = use_cache lowerCamelCase_ = pretraining_tp lowerCamelCase_ = apply_residual_connection_post_layernorm lowerCamelCase_ = hidden_dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = bos_token_id lowerCamelCase_ = eos_token_id lowerCamelCase_ = slow_but_exact super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class a ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Union[str, Any] = version.parse("""1.12""" ) def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple = "default" , __SCREAMING_SNAKE_CASE : Union[str, Any] = None , __SCREAMING_SNAKE_CASE : Tuple = False , ) -> List[str]: super().__init__(__SCREAMING_SNAKE_CASE , task=__SCREAMING_SNAKE_CASE , patching_specs=__SCREAMING_SNAKE_CASE , use_past=__SCREAMING_SNAKE_CASE ) if not getattr(self._config , 'pad_token_id' , __SCREAMING_SNAKE_CASE ): # TODO: how to do that better? lowerCamelCase_ = 0 @property def UpperCamelCase ( self : List[str] ) -> Any: lowerCamelCase_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='inputs' , inverted_values_shape=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase_ = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase ( self : Any ) -> Any: return self._config.n_layer @property def UpperCamelCase ( self : List[str] ) -> int: return self._config.n_head @property def UpperCamelCase ( self : str ) -> Optional[int]: return 1e-3 def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] = -1 , __SCREAMING_SNAKE_CASE : Optional[int] = -1 , __SCREAMING_SNAKE_CASE : Optional[int] = False , __SCREAMING_SNAKE_CASE : int = None , ) -> Optional[int]: lowerCamelCase_ = super(__SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() lowerCamelCase_ = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCamelCase_ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase_ = seqlen + 2 lowerCamelCase_ = self._config.hidden_size // self.num_attention_heads lowerCamelCase_ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCamelCase_ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCamelCase_ = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] lowerCamelCase_ = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase_ = ordered_inputs["""attention_mask"""].dtype lowerCamelCase_ = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def UpperCamelCase ( self : Tuple ) -> Optional[Any]: return 13
549
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __snake_case (metaclass=__SCREAMING_SNAKE_CASE ): __a = ["""torch""", """scipy"""] def __init__( self: str , *A_: List[Any] , **A_: Tuple ): requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def __a ( cls: Dict , *A_: Optional[int] , **A_: Tuple ): requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def __a ( cls: List[Any] , *A_: Union[str, Any] , **A_: Union[str, Any] ): requires_backends(cls , ["""torch""", """scipy"""] )
281
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
"""simple docstring""" from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __lowercase ( _a , _a ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_a , _a ) ) ) def __lowercase ( _a , _a ): if dataset.ndim != value_array.ndim: snake_case_ : int = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(_a ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Any = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(_a ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: snake_case_ : Optional[Any] = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(_a ) snake_case_ : str = [] for value in value_array: snake_case_ : str = euclidean(_a , dataset[0] ) snake_case_ : Tuple = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[int] = euclidean(_a , _a ) if dist > temp_dist: snake_case_ : List[Any] = temp_dist snake_case_ : str = dataset_value.tolist() answer.append([vector, dist] ) return answer def __lowercase ( _a , _a ): return np.dot(_a , _a ) / (norm(_a ) * norm(_a )) if __name__ == "__main__": import doctest doctest.testmod()
123
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def _A ( A ,A=1_0_0_0 ) -> Optional[Any]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowercase : List[str] = n - 1 lowercase : List[Any] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowercase : str = 0 while count < prec: lowercase : str = random.randint(2 ,n - 1 ) lowercase : Dict = bin_exp_mod(A ,A ,A ) if b != 1: lowercase : Any = True for _ in range(A ): if b == n - 1: lowercase : Optional[int] = False break lowercase : Optional[Any] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCAmelCase : Optional[int] = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
372
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline a_ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') a_ = parser.parse_args() a_ = 'cpu' a_ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' a_ = 'path-to-your-trained-model' a_ = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) a_ = pipe.to(device) # to channels last a_ = pipe.unet.to(memory_format=torch.channels_last) a_ = pipe.vae.to(memory_format=torch.channels_last) a_ = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: a_ = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex a_ = torch.randn(2, 4, 64, 64) a_ = torch.rand(1) * 999 a_ = torch.randn(2, 77, 768) a_ = (sample, timestep, encoder_hidden_status) try: a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) a_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) a_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: a_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute a_ = 666 a_ = torch.Generator(device).manual_seed(seed) a_ = {'generator': generator} if args.steps is not None: a_ = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): a_ = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
25
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets snake_case_ : Tuple = datasets.logging.get_logger(__name__) snake_case_ : Dict = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } ''' snake_case_ : int = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' snake_case_ : Dict = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ ( datasets.Metric ): '''simple docstring''' def a ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def a ( self , A_ ): if self.config_name == "default": _UpperCamelCase = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _UpperCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def a ( self , A_ , A_ , A_ , A_=None , A_=False ): if gpus is None: _UpperCamelCase = 1 if torch.cuda.is_available() else 0 _UpperCamelCase = {"""src""": sources, """mt""": predictions, """ref""": references} _UpperCamelCase = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )] _UpperCamelCase = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ ) return {"mean_score": mean_score, "scores": scores}
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() A_ : Any = { 'bart': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'bert': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'bert-base-cased-finetuned-mrpc': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'dpr': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'gpt2': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlnet': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'xlm-roberta': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'transfo-xl': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'openai-gpt': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'roberta': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'layoutlm': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), 'roberta-large-mnli': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'camembert': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'flaubert': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'distilbert-base-distilled-squad': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'lxmert-visual-feature-encoder': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'ctrl': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'albert': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 't5': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'electra': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), 'wav2vec2': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __snake_case ( __A : Union[str, Any] , __A : List[str] , __A : Optional[int] , __A : List[str] , __A : int=False , __A : Union[str, Any]=True ) -> Tuple: '''simple docstring''' if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: SCREAMING_SNAKE_CASE : int = cached_file(__A , __A , force_download=not use_cached_models ) SCREAMING_SNAKE_CASE : Optional[Any] = config_class.from_json_file(__A ) SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Tuple = True print(F"""Building TensorFlow model from configuration: {config}""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): SCREAMING_SNAKE_CASE : Optional[Any] = cached_file( __A , __A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: SCREAMING_SNAKE_CASE : Dict = load_pytorch_checkpoint_in_tfa_model(__A , __A ) if compare_with_pt_model: SCREAMING_SNAKE_CASE : List[str] = tf_model(tf_model.dummy_inputs , training=__A ) # build the network SCREAMING_SNAKE_CASE : Tuple = torch.load(__A , map_location='cpu' ) SCREAMING_SNAKE_CASE : Optional[Any] = pt_model_class.from_pretrained( pretrained_model_name_or_path=__A , config=__A , state_dict=__A ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = pt_model(**pt_model.dummy_inputs ) SCREAMING_SNAKE_CASE : Optional[int] = pto[0].numpy() SCREAMING_SNAKE_CASE : int = tfo[0].numpy() SCREAMING_SNAKE_CASE : List[str] = np.amax(np.abs(np_pt - np_tf ) ) print(F"""Max absolute difference between models outputs {diff}""" ) assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}""" # Save pytorch-model print(F"""Save TensorFlow model to {tf_dump_path}""" ) tf_model.save_weights(__A , save_format='h5' ) def __snake_case ( __A : Dict , __A : Dict , __A : Tuple=None , __A : Optional[int]=None , __A : Any=False , __A : Tuple=False , __A : Union[str, Any]=False , __A : int=False , ) -> Union[str, Any]: '''simple docstring''' if args_model_type is None: SCREAMING_SNAKE_CASE : int = list(MODEL_CLASSES.keys() ) else: SCREAMING_SNAKE_CASE : List[str] = [args_model_type] for j, model_type in enumerate(__A , start=1 ): print('=' * 100 ) print(F""" Converting model type {j}/{len(__A )}: {model_type}""" ) print('=' * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" ) SCREAMING_SNAKE_CASE : Tuple = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: SCREAMING_SNAKE_CASE : Optional[int] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: SCREAMING_SNAKE_CASE : List[str] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__A , __A ) , start=1 ): print('-' * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" ) continue SCREAMING_SNAKE_CASE : Optional[Any] = model_shortcut_name elif only_convert_finetuned_models: print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" ) continue print( F""" Converting checkpoint {i}/{len(__A )}: {model_shortcut_name} - model_type {model_type}""" ) print('-' * 100 ) if config_shortcut_name in aws_config_map: SCREAMING_SNAKE_CASE : str = cached_file(__A , __A , force_download=not use_cached_models ) else: SCREAMING_SNAKE_CASE : Dict = config_shortcut_name if model_shortcut_name in aws_model_maps: SCREAMING_SNAKE_CASE : str = cached_file(__A , __A , force_download=not use_cached_models ) else: SCREAMING_SNAKE_CASE : int = model_shortcut_name if os.path.isfile(__A ): SCREAMING_SNAKE_CASE : Optional[Any] = """converted_model""" convert_pt_checkpoint_to_tf( model_type=__A , pytorch_checkpoint_path=__A , config_file=__A , tf_dump_path=os.path.join(__A , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=__A , ) if remove_cached_files: os.remove(__A ) os.remove(__A ) if __name__ == "__main__": A_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.' ) parser.add_argument( '--model_type', default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' 'convert all the models from AWS.' ), ) parser.add_argument( '--pytorch_checkpoint_path', default=None, type=str, help=( 'Path to the PyTorch checkpoint path or shortcut name to download from AWS. ' 'If not given, will download and convert all the checkpoints from AWS.' ), ) parser.add_argument( '--config_file', default=None, type=str, help=( 'The config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture. If not given and ' '--pytorch_checkpoint_path is not given or is a shortcut name ' 'use the configuration associated to the shortcut name on the AWS' ), ) parser.add_argument( '--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.' ) parser.add_argument( '--use_cached_models', action='store_true', help='Use cached models if possible instead of updating to latest checkpoint versions.', ) parser.add_argument( '--remove_cached_files', action='store_true', help='Remove pytorch models after conversion (save memory when converting in batches).', ) parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.') A_ : List[Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
265
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _A( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCAmelCase ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def _A( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def _A( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCAmelCase ): http_head("""https://huggingface.co""" )
363
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
import math def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ): """simple docstring""" return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" a_ : str = 0 a_ : Tuple = n while left <= right: a_ : Tuple = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: a_ : int = mid - 1 else: a_ : Dict = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
419
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' from torch import nn class a__ ( nn.Module ): def __init__(self : int, __UpperCAmelCase : List[Any], __UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE : Dict = class_size SCREAMING_SNAKE_CASE : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) SCREAMING_SNAKE_CASE : int = nn.Linear(__UpperCAmelCase, __UpperCAmelCase ) def lowercase__ (self : Tuple, __UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.mlp(__UpperCAmelCase ) return logits
507
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate UpperCAmelCase_ : Any = trt.Logger(trt.Logger.WARNING) UpperCAmelCase_ : List[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) UpperCAmelCase_ : Union[str, Any] = logging.getLogger(__name__) UpperCAmelCase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_8_4, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_2_8, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=2_0, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=3_0, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=4_2, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) UpperCAmelCase_ : int = parser.parse_args() if args.tokenizer_name: UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) UpperCAmelCase_ : Optional[Any] = args.per_device_eval_batch_size UpperCAmelCase_ : Optional[int] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: UpperCAmelCase_ : Dict = '''temp_engine/bert-fp16.engine''' if args.inta: UpperCAmelCase_ : Any = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') UpperCAmelCase_ : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network UpperCAmelCase_ : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] UpperCAmelCase_ : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: UpperCAmelCase_ : str = 1 << 5_0 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) UpperCAmelCase_ : Any = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) UpperCAmelCase_ : Tuple = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def _lowerCAmelCase(a : Optional[int] , a : str , a : int , a : List[Any] , a : List[Any] , a : Optional[Any] , a : Dict , a : Optional[int] ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE =np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) _SCREAMING_SNAKE_CASE =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) _SCREAMING_SNAKE_CASE =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , a ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , a ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , a ) # start time _SCREAMING_SNAKE_CASE =time.time() # Run inference context.execute_async( bindings=[int(a ) for d_inp in d_inputs] + [int(a ), int(a )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(a , a , a ) cuda.memcpy_dtoh_async(a , a , a ) # Synchronize the stream and take time stream.synchronize() # end time _SCREAMING_SNAKE_CASE =time.time() _SCREAMING_SNAKE_CASE =end_time - start_time _SCREAMING_SNAKE_CASE =(h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. UpperCAmelCase_ : Optional[int] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase_ : Tuple = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. UpperCAmelCase_ : List[str] = raw_datasets['''validation'''].column_names UpperCAmelCase_ : Optional[int] = '''question''' if '''question''' in column_names else column_names[0] UpperCAmelCase_ : Any = '''context''' if '''context''' in column_names else column_names[1] UpperCAmelCase_ : Any = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). UpperCAmelCase_ : Optional[Any] = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) UpperCAmelCase_ : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _lowerCAmelCase(a : Dict ) -> Optional[int]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace _SCREAMING_SNAKE_CASE =[q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. _SCREAMING_SNAKE_CASE =tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=a , stride=args.doc_stride , return_overflowing_tokens=a , return_offsets_mapping=a , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. _SCREAMING_SNAKE_CASE =tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. _SCREAMING_SNAKE_CASE =[] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). _SCREAMING_SNAKE_CASE =tokenized_examples.sequence_ids(a ) _SCREAMING_SNAKE_CASE =1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. _SCREAMING_SNAKE_CASE =sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. _SCREAMING_SNAKE_CASE =[ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples UpperCAmelCase_ : List[str] = raw_datasets['''validation'''] # Validation Feature Creation UpperCAmelCase_ : List[Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) UpperCAmelCase_ : Dict = default_data_collator UpperCAmelCase_ : Any = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) UpperCAmelCase_ : str = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _lowerCAmelCase(a : Optional[int] , a : Optional[int] , a : Tuple , a : Optional[int]="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. _SCREAMING_SNAKE_CASE =postprocess_qa_predictions( examples=a , features=a , predictions=a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=a , ) # Format the result to the format the metric expects. if args.version_2_with_negative: _SCREAMING_SNAKE_CASE =[ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: _SCREAMING_SNAKE_CASE =[{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] _SCREAMING_SNAKE_CASE =[{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=a , label_ids=a ) UpperCAmelCase_ : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _lowerCAmelCase(a : int ) -> Optional[int]: return trt.volume(engine.get_binding_shape(a ) ) * engine.get_binding_dtype(a ).itemsize # Allocate device memory for inputs and outputs. UpperCAmelCase_ : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer UpperCAmelCase_ : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) UpperCAmelCase_ : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) UpperCAmelCase_ : str = cuda.mem_alloc(h_outputa.nbytes) UpperCAmelCase_ : int = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. UpperCAmelCase_ : int = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f" Num examples = {len(eval_dataset)}") logger.info(f" Batch size = {args.per_device_eval_batch_size}") UpperCAmelCase_ : Any = 0.0 UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Dict = timeit.default_timer() UpperCAmelCase_ : List[str] = None for step, batch in enumerate(eval_dataloader): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 UpperCAmelCase_ , UpperCAmelCase_ : str = outputs UpperCAmelCase_ : Optional[int] = torch.tensor(start_logits) UpperCAmelCase_ : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered UpperCAmelCase_ : Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0) UpperCAmelCase_ : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0) UpperCAmelCase_ : Tuple = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) UpperCAmelCase_ : Any = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0) if all_preds is not None: UpperCAmelCase_ : int = nested_truncate(all_preds, len(eval_dataset)) UpperCAmelCase_ : List[Any] = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1_0_0_0)) logger.info('''Total Number of Inference = %d''', niter) UpperCAmelCase_ : Any = post_processing_function(eval_examples, eval_dataset, all_preds) UpperCAmelCase_ : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"Evaluation metrics: {eval_metric}")
255
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
"""simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = tuple[float, float, float] _SCREAMING_SNAKE_CASE : List[Any] = tuple[float, float, float] def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ) -> int: lowerCamelCase_ = end_pointa[0] - end_pointa[0] lowerCamelCase_ = end_pointa[1] - end_pointa[1] lowerCamelCase_ = end_pointa[2] - end_pointa[2] return (x, y, z) def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ) -> str: lowerCamelCase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCamelCase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCamelCase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ) -> Union[str, Any]: return tuple(round(_lowerCamelCase , _lowerCamelCase ) for x in vector ) == (0, 0, 0) def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Any = 10 ) -> Union[str, Any]: lowerCamelCase_ = create_vector(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = create_vector(_lowerCamelCase , _lowerCamelCase ) return is_zero_vector(get_ad_vectors_cross(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
549
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
281
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _UpperCAmelCase ( unittest.TestCase): def _snake_case ( self : Dict ): snake_case_ : Union[str, Any] = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def _snake_case ( self : Union[str, Any] ): snake_case_ : Tuple = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def _snake_case ( self : List[Any] ): snake_case_ : Any = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowercase_ ) ) def _snake_case ( self : Dict ): snake_case_ : str = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(lowercase_ ) ) def _snake_case ( self : Union[str, Any] ): snake_case_ : Optional[Any] = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : Tuple = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] snake_case_ : Optional[int] = """fp16""" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : Optional[Any] ): snake_case_ : Union[str, Any] = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] snake_case_ : Union[str, Any] = """fp16""" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : Union[str, Any] ): # pass variant but use the non-variant filenames snake_case_ : int = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] snake_case_ : str = """fp16""" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : str ): snake_case_ : int = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] snake_case_ : str = """fp16""" self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : List[Any] ): snake_case_ : List[str] = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] snake_case_ : List[str] = """fp16""" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : Optional[Any] ): # pass variant but use the non-variant filenames snake_case_ : str = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] snake_case_ : List[str] = """fp16""" self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) ) def _snake_case ( self : List[str] ): snake_case_ : List[Any] = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] snake_case_ : Tuple = """fp16""" self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
123
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase : Tuple = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowerCAmelCase : str = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def _A ( A ) -> Any: lowercase : Optional[int] = EfficientNetConfig() lowercase : Optional[int] = CONFIG_MAP[model_name]["""hidden_dim"""] lowercase : List[str] = CONFIG_MAP[model_name]["""width_coef"""] lowercase : List[Any] = CONFIG_MAP[model_name]["""depth_coef"""] lowercase : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""] lowercase : Any = CONFIG_MAP[model_name]["""dropout_rate"""] lowercase : Any = CONFIG_MAP[model_name]["""dw_padding"""] lowercase : Dict = """huggingface/label-files""" lowercase : Dict = """imagenet-1k-id2label.json""" lowercase : Tuple = 1_0_0_0 lowercase : Tuple = json.load(open(hf_hub_download(A ,A ,repo_type="dataset" ) ,"r" ) ) lowercase : int = {int(A ): v for k, v in idalabel.items()} lowercase : List[Any] = idalabel lowercase : Optional[Any] = {v: k for k, v in idalabel.items()} return config def _A ( ) -> List[str]: lowercase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase : int = Image.open(requests.get(A ,stream=A ).raw ) return im def _A ( A ) -> Any: lowercase : int = CONFIG_MAP[model_name]["""image_size"""] lowercase : Optional[Any] = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] ,do_center_crop=A ,) return preprocessor def _A ( A ) -> int: lowercase : List[str] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowercase : int = sorted(set(A ) ) lowercase : Any = len(A ) lowercase : Tuple = {b: str(A ) for b, i in zip(A ,range(A ) )} lowercase : Dict = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowercase : int = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowercase : List[str] = {} for item in rename_keys: if item[0] in original_param_names: lowercase : Optional[Any] = """efficientnet.""" + item[1] lowercase : Any = """classifier.weight""" lowercase : Dict = """classifier.bias""" return key_mapping def _A ( A ,A ,A ) -> Optional[int]: for key, value in tf_params.items(): if "normalization" in key: continue lowercase : Any = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase : Dict = torch.from_numpy(A ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowercase : Dict = torch.from_numpy(A ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowercase : str = torch.from_numpy(np.transpose(A ) ) else: lowercase : List[str] = torch.from_numpy(A ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(A ) @torch.no_grad() def _A ( A ,A ,A ,A ) -> Optional[Any]: lowercase : Tuple = model_classes[model_name]( include_top=A ,weights="imagenet" ,input_tensor=A ,input_shape=A ,pooling=A ,classes=1_0_0_0 ,classifier_activation="softmax" ,) lowercase : Optional[int] = original_model.trainable_variables lowercase : str = original_model.non_trainable_variables lowercase : str = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase : Optional[Any] = param.numpy() lowercase : List[Any] = list(tf_params.keys() ) # Load HuggingFace model lowercase : Union[str, Any] = get_efficientnet_config(A ) lowercase : Optional[int] = EfficientNetForImageClassification(A ).eval() lowercase : List[Any] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowercase : str = rename_keys(A ) replace_params(A ,A ,A ) # Initialize preprocessor and preprocess input image lowercase : List[str] = convert_image_processor(A ) lowercase : List[Any] = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase : List[Any] = hf_model(**A ) lowercase : List[Any] = outputs.logits.detach().numpy() # Original model inference lowercase : Optional[Any] = False lowercase : Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""] lowercase : Any = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowercase : Tuple = image.img_to_array(A ) lowercase : Optional[Any] = np.expand_dims(A ,axis=0 ) lowercase : List[str] = original_model.predict(A ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(A ,A ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(A ): os.mkdir(A ) # Save converted model and image processor hf_model.save_pretrained(A ) preprocessor.save_pretrained(A ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) lowercase : List[Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(A ) hf_model.push_to_hub(A ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowerCAmelCase : int = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
372
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ = logging.get_logger(__name__) a_ = OrderedDict( [ ('audio-spectrogram-transformer', 'ASTFeatureExtractor'), ('beit', 'BeitFeatureExtractor'), ('chinese_clip', 'ChineseCLIPFeatureExtractor'), ('clap', 'ClapFeatureExtractor'), ('clip', 'CLIPFeatureExtractor'), ('clipseg', 'ViTFeatureExtractor'), ('conditional_detr', 'ConditionalDetrFeatureExtractor'), ('convnext', 'ConvNextFeatureExtractor'), ('cvt', 'ConvNextFeatureExtractor'), ('data2vec-audio', 'Wav2Vec2FeatureExtractor'), ('data2vec-vision', 'BeitFeatureExtractor'), ('deformable_detr', 'DeformableDetrFeatureExtractor'), ('deit', 'DeiTFeatureExtractor'), ('detr', 'DetrFeatureExtractor'), ('dinat', 'ViTFeatureExtractor'), ('donut-swin', 'DonutFeatureExtractor'), ('dpt', 'DPTFeatureExtractor'), ('encodec', 'EncodecFeatureExtractor'), ('flava', 'FlavaFeatureExtractor'), ('glpn', 'GLPNFeatureExtractor'), ('groupvit', 'CLIPFeatureExtractor'), ('hubert', 'Wav2Vec2FeatureExtractor'), ('imagegpt', 'ImageGPTFeatureExtractor'), ('layoutlmv2', 'LayoutLMv2FeatureExtractor'), ('layoutlmv3', 'LayoutLMv3FeatureExtractor'), ('levit', 'LevitFeatureExtractor'), ('maskformer', 'MaskFormerFeatureExtractor'), ('mctct', 'MCTCTFeatureExtractor'), ('mobilenet_v1', 'MobileNetV1FeatureExtractor'), ('mobilenet_v2', 'MobileNetV2FeatureExtractor'), ('mobilevit', 'MobileViTFeatureExtractor'), ('nat', 'ViTFeatureExtractor'), ('owlvit', 'OwlViTFeatureExtractor'), ('perceiver', 'PerceiverFeatureExtractor'), ('poolformer', 'PoolFormerFeatureExtractor'), ('regnet', 'ConvNextFeatureExtractor'), ('resnet', 'ConvNextFeatureExtractor'), ('segformer', 'SegformerFeatureExtractor'), ('sew', 'Wav2Vec2FeatureExtractor'), ('sew-d', 'Wav2Vec2FeatureExtractor'), ('speech_to_text', 'Speech2TextFeatureExtractor'), ('speecht5', 'SpeechT5FeatureExtractor'), ('swiftformer', 'ViTFeatureExtractor'), ('swin', 'ViTFeatureExtractor'), ('swinv2', 'ViTFeatureExtractor'), ('table-transformer', 'DetrFeatureExtractor'), ('timesformer', 'VideoMAEFeatureExtractor'), ('tvlt', 'TvltFeatureExtractor'), ('unispeech', 'Wav2Vec2FeatureExtractor'), ('unispeech-sat', 'Wav2Vec2FeatureExtractor'), ('van', 'ConvNextFeatureExtractor'), ('videomae', 'VideoMAEFeatureExtractor'), ('vilt', 'ViltFeatureExtractor'), ('vit', 'ViTFeatureExtractor'), ('vit_mae', 'ViTFeatureExtractor'), ('vit_msn', 'ViTFeatureExtractor'), ('wav2vec2', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'), ('wavlm', 'Wav2Vec2FeatureExtractor'), ('whisper', 'WhisperFeatureExtractor'), ('xclip', 'CLIPFeatureExtractor'), ('yolos', 'YolosFeatureExtractor'), ] ) a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def lowerCamelCase__ ( _a): for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: SCREAMING_SNAKE_CASE : Optional[int] = model_type_to_module_name(_a) SCREAMING_SNAKE_CASE : Optional[int] = importlib.import_module(f".{module_name}" , "transformers.models") try: return getattr(_a , _a) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_a , "__name__" , _a) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. SCREAMING_SNAKE_CASE : Tuple = importlib.import_module("transformers") if hasattr(_a , _a): return getattr(_a , _a) return None def lowerCamelCase__ ( _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ): SCREAMING_SNAKE_CASE : str = get_file_from_repo( _a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead.") return {} with open(_a , encoding="utf-8") as reader: return json.load(_a) class _UpperCamelCase : '''simple docstring''' def __init__( self : str ) -> Optional[Any]: """simple docstring""" raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(a ) def __UpperCamelCase ( cls : Union[str, Any] , a : Optional[Any] , **a : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("config" , a ) SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("trust_remote_code" , a ) SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Union[str, Any] = FeatureExtractionMixin.get_feature_extractor_dict(a , **a ) SCREAMING_SNAKE_CASE : Dict = config_dict.get("feature_extractor_type" , a ) SCREAMING_SNAKE_CASE : Dict = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(a , a ): SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(a , **a ) # It could be in `config.feature_extractor_type`` SCREAMING_SNAKE_CASE : List[str] = getattr(a , "feature_extractor_type" , a ) if hasattr(a , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: SCREAMING_SNAKE_CASE : int = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: SCREAMING_SNAKE_CASE : Tuple = feature_extractor_class_from_name(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor_auto_map is not None SCREAMING_SNAKE_CASE : str = feature_extractor_class is not None or type(a ) in FEATURE_EXTRACTOR_MAPPING SCREAMING_SNAKE_CASE : List[Any] = resolve_trust_remote_code( a , a , a , a ) if has_remote_code and trust_remote_code: SCREAMING_SNAKE_CASE : Tuple = get_class_from_dynamic_module( a , a , **a ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("code_revision" , a ) if os.path.isdir(a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(a , **a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(a , **a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(a ) in FEATURE_EXTRACTOR_MAPPING: SCREAMING_SNAKE_CASE : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(a )] return feature_extractor_class.from_dict(a , **a ) raise ValueError( F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" ) @staticmethod def __UpperCamelCase ( a : Tuple , a : List[str] ) -> str: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(a , a )
25
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A_ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowerCAmelCase = ["""image_processor""", """tokenizer"""] _lowerCAmelCase = """OwlViTImageProcessor""" _lowerCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , A_=None , A_=None , **A_ ): _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , A_ , ) _UpperCamelCase = kwargs.pop("feature_extractor" ) _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(A_ , A_ ) def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )): _UpperCamelCase = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )] elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ): _UpperCamelCase = [] # Maximum number of queries across batch _UpperCamelCase = max([len(A_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(A_ ) != max_num_queries: _UpperCamelCase = t + [""" """] * (max_num_queries - len(A_ )) _UpperCamelCase = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ ) encodings.append(A_ ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _UpperCamelCase = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCamelCase = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCamelCase = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCamelCase = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCamelCase = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _UpperCamelCase = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _UpperCamelCase = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _UpperCamelCase = BatchEncoding() _UpperCamelCase = input_ids _UpperCamelCase = attention_mask if query_images is not None: _UpperCamelCase = BatchEncoding() _UpperCamelCase = self.image_processor( A_ , return_tensors=A_ , **A_ ).pixel_values _UpperCamelCase = query_pixel_values if images is not None: _UpperCamelCase = self.image_processor(A_ , return_tensors=A_ , **A_ ) if text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ ) def a ( self , *A_ , **A_ ): return self.image_processor.post_process(*A_ , **A_ ) def a ( self , *A_ , **A_ ): return self.image_processor.post_process_object_detection(*A_ , **A_ ) def a ( self , *A_ , **A_ ): return self.image_processor.post_process_image_guided_detection(*A_ , **A_ ) def a ( self , *A_ , **A_ ): return self.tokenizer.batch_decode(*A_ , **A_ ) def a ( self , *A_ , **A_ ): return self.tokenizer.decode(*A_ , **A_ ) @property def a ( self ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , ) return self.image_processor_class @property def a ( self ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , ) return self.image_processor
138
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets A_ : Optional[Any] = '\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n' A_ : str = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' A_ : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def _lowerCAmelCase ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=False ) -> Optional[Any]: """simple docstring""" if rouge_types is None: SCREAMING_SNAKE_CASE : List[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] SCREAMING_SNAKE_CASE : str = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE ) if use_aggregator: SCREAMING_SNAKE_CASE : Any = scoring.BootstrapAggregator() else: SCREAMING_SNAKE_CASE : List[Any] = [] for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if use_aggregator: aggregator.add_scores(_SCREAMING_SNAKE_CASE ) else: scores.append(_SCREAMING_SNAKE_CASE ) if use_aggregator: SCREAMING_SNAKE_CASE : Optional[int] = aggregator.aggregate() else: SCREAMING_SNAKE_CASE : int = {} for key in scores[0]: SCREAMING_SNAKE_CASE : Tuple = [score[key] for score in scores] return result
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=0.0 , snake_case_ = None , snake_case_ = "geglu" , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = "layer_norm" , snake_case_ = False , ): '''simple docstring''' super().__init__() A__ : Optional[Any] = only_cross_attention A__ : Dict = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" A__ : List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: A__ : Dict = AdaLayerNorm(snake_case_ , snake_case_ ) elif self.use_ada_layer_norm_zero: A__ : List[Any] = AdaLayerNormZero(snake_case_ , snake_case_ ) else: A__ : Optional[Any] = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ ) A__ : Tuple = Attention( query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , dropout=snake_case_ , bias=snake_case_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case_ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. A__ : List[Any] = ( AdaLayerNorm(snake_case_ , snake_case_ ) if self.use_ada_layer_norm else nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ ) ) A__ : str = Attention( query_dim=snake_case_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case_ , dim_head=snake_case_ , dropout=snake_case_ , bias=snake_case_ , upcast_attention=snake_case_ , ) # is self-attn if encoder_hidden_states is none else: A__ : Union[str, Any] = None A__ : int = None # 3. Feed-forward A__ : Tuple = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ ) A__ : str = FeedForward(snake_case_ , dropout=snake_case_ , activation_fn=snake_case_ , final_dropout=snake_case_ ) # let chunk size default to None A__ : Union[str, Any] = None A__ : Optional[int] = 0 def lowerCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' A__ : List[Any] = chunk_size A__ : Optional[int] = dim def lowerCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ): '''simple docstring''' if self.use_ada_layer_norm: A__ : Union[str, Any] = self.norma(snake_case_ , snake_case_ ) elif self.use_ada_layer_norm_zero: A__ : int = self.norma( snake_case_ , snake_case_ , snake_case_ , hidden_dtype=hidden_states.dtype ) else: A__ : List[Any] = self.norma(snake_case_ ) A__ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {} A__ : List[Any] = self.attna( snake_case_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case_ , **snake_case_ , ) if self.use_ada_layer_norm_zero: A__ : List[str] = gate_msa.unsqueeze(1 ) * attn_output A__ : List[str] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: A__ : Union[str, Any] = ( self.norma(snake_case_ , snake_case_ ) if self.use_ada_layer_norm else self.norma(snake_case_ ) ) A__ : List[str] = self.attna( snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=snake_case_ , **snake_case_ , ) A__ : Optional[Any] = attn_output + hidden_states # 3. Feed-forward A__ : Any = self.norma(snake_case_ ) if self.use_ada_layer_norm_zero: A__ : List[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) A__ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size A__ : Dict = torch.cat( [self.ff(snake_case_ ) for hid_slice in norm_hidden_states.chunk(snake_case_ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: A__ : Union[str, Any] = self.ff(snake_case_ ) if self.use_ada_layer_norm_zero: A__ : str = gate_mlp.unsqueeze(1 ) * ff_output A__ : List[Any] = ff_output + hidden_states return hidden_states class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = "geglu" , snake_case_ = False , ): '''simple docstring''' super().__init__() A__ : Dict = int(dim * mult ) A__ : Dict = dim_out if dim_out is not None else dim if activation_fn == "gelu": A__ : List[Any] = GELU(snake_case_ , snake_case_ ) if activation_fn == "gelu-approximate": A__ : Dict = GELU(snake_case_ , snake_case_ , approximate="""tanh""" ) elif activation_fn == "geglu": A__ : Tuple = GEGLU(snake_case_ , snake_case_ ) elif activation_fn == "geglu-approximate": A__ : List[str] = ApproximateGELU(snake_case_ , snake_case_ ) A__ : List[Any] = nn.ModuleList([] ) # project in self.net.append(snake_case_ ) # project dropout self.net.append(nn.Dropout(snake_case_ ) ) # project out self.net.append(nn.Linear(snake_case_ , snake_case_ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(snake_case_ ) ) def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' for module in self.net: A__ : Optional[Any] = module(snake_case_ ) return hidden_states class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ = "none" ): '''simple docstring''' super().__init__() A__ : List[str] = nn.Linear(snake_case_ , snake_case_ ) A__ : Union[str, Any] = approximate def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case_ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' A__ : Optional[int] = self.proj(snake_case_ ) A__ : List[str] = self.gelu(snake_case_ ) return hidden_states class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ ): '''simple docstring''' super().__init__() A__ : Any = nn.Linear(snake_case_ , dim_out * 2 ) def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' if gate.device.type != "mps": return F.gelu(snake_case_ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' A__ : List[Any] = self.proj(snake_case_ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(snake_case_ ) class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ ): '''simple docstring''' super().__init__() A__ : Dict = nn.Linear(snake_case_ , snake_case_ ) def lowerCamelCase ( self , snake_case_ ): '''simple docstring''' A__ : Any = self.proj(snake_case_ ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ ): '''simple docstring''' super().__init__() A__ : Optional[int] = nn.Embedding(snake_case_ , snake_case_ ) A__ : Union[str, Any] = nn.SiLU() A__ : List[Any] = nn.Linear(snake_case_ , embedding_dim * 2 ) A__ : Tuple = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ ) def lowerCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Union[str, Any] = self.linear(self.silu(self.emb(snake_case_ ) ) ) A__ : Any = torch.chunk(snake_case_ , 2 ) A__ : Any = self.norm(snake_case_ ) * (1 + scale) + shift return x class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ ): '''simple docstring''' super().__init__() A__ : Any = CombinedTimestepLabelEmbeddings(snake_case_ , snake_case_ ) A__ : Any = nn.SiLU() A__ : Optional[int] = nn.Linear(snake_case_ , 6 * embedding_dim , bias=snake_case_ ) A__ : Optional[Any] = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ , eps=1E-6 ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): '''simple docstring''' A__ : str = self.linear(self.silu(self.emb(snake_case_ , snake_case_ , hidden_dtype=snake_case_ ) ) ) A__ : Optional[int] = emb.chunk(6 , dim=1 ) A__ : Any = self.norm(snake_case_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCAmelCase (nn.Module ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = 1E-5 ): '''simple docstring''' super().__init__() A__ : Optional[Any] = num_groups A__ : Any = eps if act_fn is None: A__ : List[str] = None else: A__ : str = get_activation(snake_case_ ) A__ : Union[str, Any] = nn.Linear(snake_case_ , out_dim * 2 ) def lowerCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' if self.act: A__ : List[str] = self.act(snake_case_ ) A__ : Tuple = self.linear(snake_case_ ) A__ : Optional[int] = emb[:, :, None, None] A__ : int = emb.chunk(2 , dim=1 ) A__ : Tuple = F.group_norm(snake_case_ , self.num_groups , eps=self.eps ) A__ : int = x * (1 + scale) + shift return x
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
from math import factorial def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple = 1_00 ): """simple docstring""" return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
419
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class a__ ( __SCREAMING_SNAKE_CASE ): __magic_name__ : Union[str, Any] = """perceiver""" def __init__(self : Optional[int], __UpperCAmelCase : Any=256, __UpperCAmelCase : Optional[Any]=1280, __UpperCAmelCase : str=768, __UpperCAmelCase : Tuple=1, __UpperCAmelCase : Union[str, Any]=26, __UpperCAmelCase : Optional[Any]=8, __UpperCAmelCase : List[Any]=8, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : str="kv", __UpperCAmelCase : List[Any]=1, __UpperCAmelCase : Union[str, Any]=1, __UpperCAmelCase : str="gelu", __UpperCAmelCase : Union[str, Any]=0.1, __UpperCAmelCase : List[Any]=0.02, __UpperCAmelCase : List[str]=1e-12, __UpperCAmelCase : List[str]=True, __UpperCAmelCase : Any=262, __UpperCAmelCase : Optional[int]=2048, __UpperCAmelCase : Optional[Any]=56, __UpperCAmelCase : Optional[int]=[368, 496], __UpperCAmelCase : Dict=16, __UpperCAmelCase : Tuple=1920, __UpperCAmelCase : Tuple=16, __UpperCAmelCase : Union[str, Any]=[1, 16, 224, 224], **__UpperCAmelCase : Any, ) -> Any: """simple docstring""" super().__init__(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = num_latents SCREAMING_SNAKE_CASE : str = d_latents SCREAMING_SNAKE_CASE : Optional[Any] = d_model SCREAMING_SNAKE_CASE : Any = num_blocks SCREAMING_SNAKE_CASE : Union[str, Any] = num_self_attends_per_block SCREAMING_SNAKE_CASE : Optional[int] = num_self_attention_heads SCREAMING_SNAKE_CASE : int = num_cross_attention_heads SCREAMING_SNAKE_CASE : List[str] = qk_channels SCREAMING_SNAKE_CASE : Any = v_channels SCREAMING_SNAKE_CASE : List[Any] = cross_attention_shape_for_attention SCREAMING_SNAKE_CASE : Union[str, Any] = self_attention_widening_factor SCREAMING_SNAKE_CASE : int = cross_attention_widening_factor SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = use_query_residual # masked language modeling attributes SCREAMING_SNAKE_CASE : Dict = vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings # image classification attributes SCREAMING_SNAKE_CASE : Optional[Any] = image_size # flow attributes SCREAMING_SNAKE_CASE : Tuple = train_size # multimodal autoencoding attributes SCREAMING_SNAKE_CASE : Tuple = num_frames SCREAMING_SNAKE_CASE : Optional[int] = audio_samples_per_frame SCREAMING_SNAKE_CASE : List[Any] = samples_per_patch SCREAMING_SNAKE_CASE : str = output_shape class a__ ( __SCREAMING_SNAKE_CASE ): @property def lowercase__ (self : Tuple ) -> Tuple: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def lowercase__ (self : List[Any] ) -> List[Any]: """simple docstring""" return 1e-4 def lowercase__ (self : List[Any], __UpperCAmelCase : int, __UpperCAmelCase : List[Any] = -1, __UpperCAmelCase : Optional[Any] = -1, __UpperCAmelCase : int = -1, __UpperCAmelCase : List[str] = False, __UpperCAmelCase : str = None, __UpperCAmelCase : Optional[int] = 3, __UpperCAmelCase : Any = 40, __UpperCAmelCase : Union[str, Any] = 40, ) -> Optional[int]: """simple docstring""" if isinstance(__UpperCAmelCase, __UpperCAmelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension( __UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : Any = preprocessor.num_special_tokens_to_add(__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = compute_effective_axis_dimension( __UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE : Optional[int] = [""" """.join(['''a'''] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE : Optional[int] = dict(preprocessor(__UpperCAmelCase, return_tensors=__UpperCAmelCase ) ) SCREAMING_SNAKE_CASE : List[str] = inputs.pop('''input_ids''' ) return inputs elif isinstance(__UpperCAmelCase, __UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : List[str] = compute_effective_axis_dimension(__UpperCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch ) SCREAMING_SNAKE_CASE : Any = self._generate_dummy_images(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = dict(preprocessor(images=__UpperCAmelCase, return_tensors=__UpperCAmelCase ) ) SCREAMING_SNAKE_CASE : Optional[int] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
507
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" from math import factorial, pi def _lowerCAmelCase(a : Any , a : int = 30 ) -> Any: if not isinstance(a , (int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(a , a ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) _SCREAMING_SNAKE_CASE =float(a ) _SCREAMING_SNAKE_CASE =theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a ) ) def _lowerCAmelCase(a : Dict , a : List[str] = 30 ) -> Union[str, Any]: if not isinstance(a , (int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(a , a ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) _SCREAMING_SNAKE_CASE =float(a ) _SCREAMING_SNAKE_CASE =theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
255
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Any: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> List[str]: # word like '180' or '身高' or '神' for char in word: lowerCamelCase_ = ord(_lowerCamelCase ) if not _is_chinese_char(_lowerCamelCase ): return 0 return 1 def lowerCamelCase__ ( _lowerCamelCase : int ) -> Dict: lowerCamelCase_ = set() for token in tokens: lowerCamelCase_ = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase ) if chinese_word: word_set.add(_lowerCamelCase ) lowerCamelCase_ = list(_lowerCamelCase ) return word_list def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ) -> Any: if not chinese_word_set: return bert_tokens lowerCamelCase_ = max([len(_lowerCamelCase ) for w in chinese_word_set] ) lowerCamelCase_ = bert_tokens lowerCamelCase_ = 0, len(_lowerCamelCase ) while start < end: lowerCamelCase_ = True if is_chinese(bert_word[start] ): lowerCamelCase_ = min(end - start , _lowerCamelCase ) for i in range(_lowerCamelCase , 1 , -1 ): lowerCamelCase_ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase_ = """##""" + bert_word[j] lowerCamelCase_ = start + i lowerCamelCase_ = False break if single_word: start += 1 return bert_word def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ) -> List[str]: lowerCamelCase_ = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): lowerCamelCase_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws lowerCamelCase_ = [get_chinese_word(_lowerCamelCase ) for r in res] ltp_res.extend(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) lowerCamelCase_ = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): lowerCamelCase_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) lowerCamelCase_ = [] for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ): lowerCamelCase_ = [] for id in input_ids: lowerCamelCase_ = bert_tokenizer._convert_id_to_token(_lowerCamelCase ) input_tokens.append(_lowerCamelCase ) lowerCamelCase_ = add_sub_symbol(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCamelCase ): if token[:2] == "##": lowerCamelCase_ = token[2:] # save chinese tokens' pos if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ): ref_id.append(_lowerCamelCase ) ref_ids.append(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) return ref_ids def lowerCamelCase__ ( _lowerCamelCase : int ) -> List[Any]: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = f.readlines() lowerCamelCase_ = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase_ = LTP(args.ltp ) # faster in GPU device lowerCamelCase_ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase_ = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: lowerCamelCase_ = [json.dumps(_lowerCamelCase ) + """\n""" for ref in ref_ids] f.writelines(_lowerCamelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() main(args)
549
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): __a = StableDiffusionInpaintPipeline __a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __a = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __a = frozenset([] ) def __a ( self: Tuple ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A_ , ) __lowerCamelCase = PNDMScheduler(skip_prk_steps=A_ ) torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) __lowerCamelCase = CLIPTextModel(A_ ) __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowerCamelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def __a ( self: List[Any] , A_: Optional[Any] , A_: List[Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("""RGB""" ).resize((64, 64) ) __lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) ) if str(A_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(A_ ) else: __lowerCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) __lowerCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def __a ( self: Dict ): __lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = StableDiffusionInpaintPipeline(**A_ ) __lowerCamelCase = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) __lowerCamelCase = self.get_dummy_inputs(A_ ) __lowerCamelCase = sd_pipe(**A_ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self: List[str] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __snake_case (unittest.TestCase ): def __a ( self: List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self: int ): __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , output_type="""np""" , ) __lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __a ( self: Tuple ): __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained( A_ , torch_dtype=torch.floataa , safety_checker=A_ , ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , output_type="""np""" , ) __lowerCamelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __a ( self: Dict ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting""" __lowerCamelCase = PNDMScheduler.from_pretrained(A_ , subfolder="""scheduler""" ) __lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained( A_ , safety_checker=A_ , scheduler=A_ , torch_dtype=torch.floataa , ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( prompt=A_ , image=A_ , mask_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , ) __lowerCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
281
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
"""simple docstring""" import argparse import os import re lowercase__ : int = '''src/transformers''' # Pattern that looks at the indentation in a line. lowercase__ : List[Any] = re.compile(r'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. lowercase__ : str = re.compile(r'''^\s*\"([^\"]+)\":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. lowercase__ : int = re.compile(r'''^\s*_import_structure\[\"([^\"]+)\"\]''') # Pattern that matches `"key",` and puts `key` in group 0. lowercase__ : Dict = re.compile(r'''^\s*\"([^\"]+)\",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. lowercase__ : str = re.compile(r'''\[([^\]]+)\]''') def __lowercase ( _a ): snake_case_ : List[Any] = _re_indent.search(_a ) return "" if search is None else search.groups()[0] def __lowercase ( _a , _a="" , _a=None , _a=None ): snake_case_ : Union[str, Any] = 0 snake_case_ : str = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(_a ): index += 1 snake_case_ : Dict = ["""\n""".join(lines[:index] )] else: snake_case_ : int = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case_ : Tuple = [lines[index]] index += 1 while index < len(_a ) and (end_prompt is None or not lines[index].startswith(_a )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(_a ) ) if index < len(_a ) - 1: snake_case_ : str = [lines[index + 1]] index += 1 else: snake_case_ : Union[str, Any] = [] else: blocks.append('''\n'''.join(_a ) ) snake_case_ : Any = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_a ) > 0: blocks.append('''\n'''.join(_a ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_a ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def __lowercase ( _a ): def _inner(_a ): return key(_a ).lower().replace('''_''' , '''''' ) return _inner def __lowercase ( _a , _a=None ): # If no key is provided, we use a noop. def noop(_a ): return x if key is None: snake_case_ : List[Any] = noop # Constants are all uppercase, they go first. snake_case_ : Tuple = [obj for obj in objects if key(_a ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case_ : int = [obj for obj in objects if key(_a )[0].isupper() and not key(_a ).isupper()] # Functions begin with a lowercase, they go last. snake_case_ : Tuple = [obj for obj in objects if not key(_a )[0].isupper()] snake_case_ : str = ignore_underscore(_a ) return sorted(_a , key=_a ) + sorted(_a , key=_a ) + sorted(_a , key=_a ) def __lowercase ( _a ): # This inner function sort imports between [ ]. def _replace(_a ): snake_case_ : Optional[int] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case_ : Optional[Any] = [part.strip().replace('''\"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case_ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_a )] ) + "]" snake_case_ : Optional[Any] = import_statement.split('''\n''' ) if len(_a ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case_ : List[Any] = 2 if lines[1].strip() == """[""" else 1 snake_case_ : List[Any] = [(i, _re_strip_line.search(_a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case_ : Any = sort_objects(_a , key=lambda _a : x[1] ) snake_case_ : str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_a ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case_ : int = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case_ : Optional[Any] = [part.strip().replace('''\"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case_ : List[Any] = keys[:-1] snake_case_ : List[Any] = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_a )] ) return "\n".join(_a ) else: # Finally we have to deal with imports fitting on one line snake_case_ : str = _re_bracket_content.sub(_replace , _a ) return import_statement def __lowercase ( _a , _a=True ): with open(_a , encoding='''utf-8''' ) as f: snake_case_ : Optional[Any] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case_ : Tuple = split_code_in_indented_blocks( _a , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_a ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case_ : Tuple = main_blocks[block_idx] snake_case_ : Optional[int] = block.split('''\n''' ) # Get to the start of the imports. snake_case_ : List[str] = 0 while line_idx < len(_a ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case_ : Optional[int] = len(_a ) else: line_idx += 1 if line_idx >= len(_a ): continue # Ignore beginning and last line: they don't contain anything. snake_case_ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case_ : List[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case_ : Union[str, Any] = split_code_in_indented_blocks(_a , indent_level=_a ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case_ : str = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case_ : List[str] = [(pattern.search(_a ).groups()[0] if pattern.search(_a ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case_ : List[str] = [(i, key) for i, key in enumerate(_a ) if key is not None] snake_case_ : Optional[int] = [x[0] for x in sorted(_a , key=lambda _a : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case_ : List[str] = 0 snake_case_ : List[str] = [] for i in range(len(_a ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case_ : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_a ) count += 1 # And we put our main block back together with its first and last line. snake_case_ : List[str] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_a ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_a , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(_a ) ) def __lowercase ( _a=True ): snake_case_ : List[Any] = [] for root, _, files in os.walk(_a ): if "__init__.py" in files: snake_case_ : List[str] = sort_imports(os.path.join(_a , '''__init__.py''' ) , check_only=_a ) if result: snake_case_ : Dict = [os.path.join(_a , '''__init__.py''' )] if len(_a ) > 0: raise ValueError(f"Would overwrite {len(_a )} files, run `make style`." ) if __name__ == "__main__": lowercase__ : Any = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowercase__ : Dict = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
123
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Dict = """▁""" lowerCAmelCase : Any = {"""vocab_file""": """prophetnet.tokenizer"""} lowerCAmelCase : List[str] = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } lowerCAmelCase : List[Any] = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": 5_1_2, } def _A ( A ) -> str: lowercase : Optional[int] = collections.OrderedDict() with open(A ,"r" ,encoding="utf-8" ) as reader: lowercase : str = reader.readlines() for index, token in enumerate(A ): lowercase : Optional[int] = token.rstrip("\n" ) lowercase : Tuple = index return vocab class _UpperCamelCase ( __SCREAMING_SNAKE_CASE): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ["""input_ids""", """attention_mask"""] def __init__( self , a_ , a_="[SEP]" , a_="[SEP]" , a_="[SEP]" , a_="[UNK]" , a_="[PAD]" , a_="[CLS]" , a_="[MASK]" , a_ = None , **a_ , ) -> Union[str, Any]: lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a_ ) ) lowercase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase : List[str] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(1_0 ): lowercase : List[str] = F'''[unused{i}]''' lowercase : int = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase : Dict = 1_2 lowercase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(a_ ) def __getstate__( self ) -> Dict: lowercase : str = self.__dict__.copy() lowercase : Any = None return state def __setstate__( self , a_ ) -> Dict: lowercase : List[Any] = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowercase : int = {} lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a__ ( self , a_ , a_ = None , a_ = False ) -> Union[str, Any]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ ) if token_ids_a is None: return ([0] * len(a_ )) + [1] return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1] def a__ ( self , a_ , a_ = None ) -> Dict: lowercase : List[Any] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def a__ ( self ) -> Tuple: return len(self.sp_model ) + self.fairseq_offset def a__ ( self ) -> Optional[int]: lowercase : Union[str, Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self , a_ ) -> Union[str, Any]: return self.sp_model.encode(a_ , out_type=a_ ) def a__ ( self , a_ ) -> List[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : Optional[int] = self.sp_model.PieceToId(a_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a__ ( self , a_ ) -> Optional[int]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def a__ ( self , a_ ) -> Optional[int]: lowercase : int = """""".join(a_ ).replace(a_ , " " ).strip() return out_string def a__ ( self , a_ , a_ = None ) -> Tuple: if not os.path.isdir(a_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase : List[Any] = os.path.join( a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a_ ) elif not os.path.isfile(self.vocab_file ): with open(a_ , "wb" ) as fi: lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(a_ ) return (out_vocab_file,) def a__ ( self , a_ , a_ = None ) -> Union[str, Any]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase : List[str] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
372
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class _UpperCamelCase : '''simple docstring''' def __init__( self : List[str] , a : Optional[int] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = parent SCREAMING_SNAKE_CASE : List[Any] = 13 SCREAMING_SNAKE_CASE : List[Any] = 7 SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : int = 2 SCREAMING_SNAKE_CASE : Union[str, Any] = 99 SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : Any = 32 SCREAMING_SNAKE_CASE : Tuple = 2 SCREAMING_SNAKE_CASE : int = 4 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE : str = 0.1 SCREAMING_SNAKE_CASE : List[str] = 512 SCREAMING_SNAKE_CASE : Any = 16 SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : Union[str, Any] = 0.02 SCREAMING_SNAKE_CASE : Any = 3 SCREAMING_SNAKE_CASE : Optional[int] = 4 SCREAMING_SNAKE_CASE : Optional[int] = """last""" SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = 0 def __UpperCamelCase ( self : str ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) SCREAMING_SNAKE_CASE : int = None if self.use_input_lengths: SCREAMING_SNAKE_CASE : str = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE : List[str] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE : Optional[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __UpperCamelCase ( self : Optional[Any] , a : Tuple , a : str , a : Any , a : Optional[Any] , a : Tuple , a : str , a : int , a : List[Any] , a : Dict , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertModel(config=a ) SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} SCREAMING_SNAKE_CASE : List[str] = model(a ) SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask] SCREAMING_SNAKE_CASE : Dict = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Optional[int] , a : Tuple , a : Tuple , a : Optional[Any] , a : Dict , a : Optional[Any] , a : int , a : Dict , a : Optional[int] , a : Tuple , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = TFFlaubertWithLMHeadModel(a ) SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids} SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Union[str, Any] , a : Tuple , a : List[Any] , a : Any , a : List[str] , a : Optional[Any] , a : Optional[Any] , a : Union[str, Any] , a : Tuple , a : Union[str, Any] , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = TFFlaubertForQuestionAnsweringSimple(a ) SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths} SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : int , a : Union[str, Any] , a : Optional[int] , a : str , a : str , a : Optional[Any] , a : Any , a : str , a : Optional[Any] , a : int , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = TFFlaubertForSequenceClassification(a ) SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths} SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase ( self : str , a : int , a : Optional[Any] , a : Optional[Any] , a : Optional[Any] , a : str , a : List[Any] , a : List[Any] , a : Tuple , a : Union[str, Any] , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE : int = TFFlaubertForTokenClassification(config=a ) SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE : List[str] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : str , a : Any , a : List[str] , a : Any , a : str , a : List[str] , a : Any , a : Dict , a : Optional[int] , a : Tuple , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertForMultipleChoice(config=a ) SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE : Optional[Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE ) : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """langs""": token_type_ids, """lengths""": input_lengths, } return config, inputs_dict @require_tf class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase__ =( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCamelCase__ =( { """feature-extraction""": TFFlaubertModel, """fill-mask""": TFFlaubertWithLMHeadModel, """question-answering""": TFFlaubertForQuestionAnsweringSimple, """text-classification""": TFFlaubertForSequenceClassification, """token-classification""": TFFlaubertForTokenClassification, """zero-shot""": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ =False lowerCamelCase__ =False def __UpperCamelCase ( self : List[Any] , a : str , a : Optional[Any] , a : List[Any] , a : Dict , a : str ) -> Dict: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertModelTester(self ) SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=a , emb_dim=37 ) def __UpperCamelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a ) def __UpperCamelCase ( self : List[str] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a ) def __UpperCamelCase ( self : Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*a ) def __UpperCamelCase ( self : str ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*a ) @slow def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = TFFlaubertModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_tf @require_sentencepiece @require_tokenizers class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" SCREAMING_SNAKE_CASE : Any = model(a )[0] SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , a ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor( [ [ [-1.876_8773, -1.56_6555, 0.2707_2418], [-1.692_0038, -0.587_3505, 1.932_9599], [-2.956_3985, -1.699_3835, 1.797_2052], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
25
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' def lowercase__( _UpperCamelCase : int )-> Optional[int]: """simple docstring""" _UpperCamelCase = [0] * len(_UpperCamelCase ) _UpperCamelCase = [] _UpperCamelCase = [1] * len(_UpperCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_UpperCamelCase ) ): if indegree[i] == 0: queue.append(_UpperCamelCase ) while queue: _UpperCamelCase = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _UpperCamelCase = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(_UpperCamelCase ) print(max(_UpperCamelCase ) ) # Adjacency list of Graph snake_case_ : List[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : int = logging.get_logger(__name__) A_ : Optional[Any] = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Any = """wav2vec2""" def __init__( self : Dict , _SCREAMING_SNAKE_CASE : List[Any]=32 , _SCREAMING_SNAKE_CASE : List[Any]=768 , _SCREAMING_SNAKE_CASE : Dict=12 , _SCREAMING_SNAKE_CASE : int=12 , _SCREAMING_SNAKE_CASE : List[str]=3_072 , _SCREAMING_SNAKE_CASE : str="gelu" , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.0 , _SCREAMING_SNAKE_CASE : List[Any]=0.0 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , _SCREAMING_SNAKE_CASE : Tuple="group" , _SCREAMING_SNAKE_CASE : Optional[int]="gelu" , _SCREAMING_SNAKE_CASE : str=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE : Any=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=128 , _SCREAMING_SNAKE_CASE : Any=16 , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=0.0_5 , _SCREAMING_SNAKE_CASE : Optional[Any]=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=2 , _SCREAMING_SNAKE_CASE : int=0.0 , _SCREAMING_SNAKE_CASE : List[str]=10 , _SCREAMING_SNAKE_CASE : List[str]=0 , _SCREAMING_SNAKE_CASE : str=320 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=100 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : List[Any]="sum" , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : List[Any]=False , _SCREAMING_SNAKE_CASE : List[Any]=256 , _SCREAMING_SNAKE_CASE : Dict=(512, 512, 512, 512, 1_500) , _SCREAMING_SNAKE_CASE : Dict=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE : Any=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : Tuple=3 , _SCREAMING_SNAKE_CASE : List[str]=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=3 , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> Tuple: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE : int = feat_extract_norm SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_activation SCREAMING_SNAKE_CASE : List[str] = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = conv_bias SCREAMING_SNAKE_CASE : Optional[int] = num_conv_pos_embeddings SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE : str = len(self.conv_dim ) SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : str = hidden_dropout SCREAMING_SNAKE_CASE : Tuple = attention_dropout SCREAMING_SNAKE_CASE : int = activation_dropout SCREAMING_SNAKE_CASE : Optional[Any] = feat_proj_dropout SCREAMING_SNAKE_CASE : Optional[Any] = final_dropout SCREAMING_SNAKE_CASE : Optional[int] = layerdrop SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = do_stable_layer_norm SCREAMING_SNAKE_CASE : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE : Dict = apply_spec_augment SCREAMING_SNAKE_CASE : Optional[int] = mask_time_prob SCREAMING_SNAKE_CASE : Dict = mask_time_length SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks SCREAMING_SNAKE_CASE : List[str] = mask_feature_prob SCREAMING_SNAKE_CASE : int = mask_feature_length SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE : int = num_codevectors_per_group SCREAMING_SNAKE_CASE : int = num_codevector_groups SCREAMING_SNAKE_CASE : int = contrastive_logits_temperature SCREAMING_SNAKE_CASE : List[str] = feat_quantizer_dropout SCREAMING_SNAKE_CASE : int = num_negatives SCREAMING_SNAKE_CASE : Dict = codevector_dim SCREAMING_SNAKE_CASE : Optional[Any] = proj_codevector_dim SCREAMING_SNAKE_CASE : Optional[Any] = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE : int = ctc_loss_reduction SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE : Any = add_adapter SCREAMING_SNAKE_CASE : str = adapter_kernel_size SCREAMING_SNAKE_CASE : Any = adapter_stride SCREAMING_SNAKE_CASE : Union[str, Any] = num_adapter_layers SCREAMING_SNAKE_CASE : Optional[int] = output_hidden_size or hidden_size SCREAMING_SNAKE_CASE : Optional[int] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE : int = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = list(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = xvector_output_dim @property def _lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
265
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" from random import randint, random def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = 5 , ): A__ : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car A__ : Dict = 0 A__ : str = max(lowerCAmelCase , 0 ) while i < number_of_cells: A__ : Dict = ( randint(0 , lowerCAmelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def _A( lowerCAmelCase , lowerCAmelCase ): A__ : Any = 0 A__ : int = highway_now[car_index + 1 :] for cell in range(len(lowerCAmelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(lowerCAmelCase , -1 ) def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): A__ : Optional[int] = len(lowerCAmelCase ) # Beforce calculations, the highway is empty A__ : int = [-1] * number_of_cells for car_index in range(lowerCAmelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed A__ : List[Any] = min(highway_now[car_index] + 1 , lowerCAmelCase ) # Number of empty cell before the next car A__ : int = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1 # We can't have the car causing an accident A__ : Optional[int] = min(next_highway[car_index] , lowerCAmelCase ) if random() < probability: # Randomly, a driver will slow down A__ : Any = max(next_highway[car_index] - 1 , 0 ) return next_highway def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): A__ : Union[str, Any] = len(highway[0] ) for i in range(lowerCAmelCase ): A__ : Union[str, Any] = update(highway[i] , lowerCAmelCase , lowerCAmelCase ) A__ : Tuple = [-1] * number_of_cells for car_index in range(lowerCAmelCase ): A__ : Any = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) A__ : str = (car_index + speed) % number_of_cells # Commit the change of position A__ : Optional[int] = speed highway.append(lowerCAmelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
363
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE : List[str] = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", }, "tokenizer_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json", }, } # TODO(PVP) - this should be removed in Transformers v5 SCREAMING_SNAKE_CASE : str = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } class snake_case__ ( __SCREAMING_SNAKE_CASE ): UpperCAmelCase : Tuple = VOCAB_FILES_NAMES UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""] UpperCAmelCase : Optional[Any] = TaTokenizer UpperCAmelCase : List[int] = [] def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="</s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_=100 , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[Any]: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: a_ : str = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens a_ : Any = len(set(filter(lambda UpperCamelCase_ : bool("""extra_id_""" in str(UpperCamelCase_ ) ) , UpperCamelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) a_ : str = vocab_file a_ : Any = False if not self.vocab_file else True a_ : List[str] = extra_ids @staticmethod def A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: """simple docstring""" if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: a_ : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" f""" {pretrained_model_name_or_path} automatically truncating your input to""" f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase_ , ) return max_model_length def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Union[str, Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(UpperCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a_ : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) logger.info(f"""Copy vocab file to {out_vocab_file}""" ) return (out_vocab_file,) def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> str: """simple docstring""" a_ : Optional[int] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: a_ : Any = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> int: """simple docstring""" a_ : Tuple = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A ( self ) -> List[Any]: """simple docstring""" return list( set(filter(lambda UpperCamelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase_ ) ) is not None , self.additional_special_tokens ) ) ) def A ( self ) -> List[str]: """simple docstring""" return [self.convert_tokens_to_ids(UpperCamelCase_ ) for token in self.get_sentinel_tokens()]
419
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
507
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase : int = ["""pixel_values"""] def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , _A = True , **_A , ): '''simple docstring''' super().__init__(**_A ) _SCREAMING_SNAKE_CASE =size if size is not None else {"""shortest_edge""": 2_2_4} _SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A ) _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} _SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A , param_name='''crop_size''' ) _SCREAMING_SNAKE_CASE =do_resize _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =resample _SCREAMING_SNAKE_CASE =do_center_crop _SCREAMING_SNAKE_CASE =crop_size _SCREAMING_SNAKE_CASE =do_rescale _SCREAMING_SNAKE_CASE =rescale_factor _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else OPENAI_CLIP_MEAN _SCREAMING_SNAKE_CASE =image_std if image_std is not None else OPENAI_CLIP_STD _SCREAMING_SNAKE_CASE =do_convert_rgb def UpperCamelCase_ ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ): '''simple docstring''' _SCREAMING_SNAKE_CASE =get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _SCREAMING_SNAKE_CASE =get_resize_output_image_size(_A , size=size['''shortest_edge'''] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self , _A , _A , _A = None , **_A , ): '''simple docstring''' _SCREAMING_SNAKE_CASE =get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A ) def UpperCamelCase_ ( self , _A , _A , _A = None , **_A , ): '''simple docstring''' return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self , _A , _A , _A , _A = None , **_A , ): '''simple docstring''' return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCamelCase_ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ): '''simple docstring''' _SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize _SCREAMING_SNAKE_CASE =size if size is not None else self.size _SCREAMING_SNAKE_CASE =get_size_dict(_A , param_name='''size''' , default_to_square=_A ) _SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample _SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop _SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size _SCREAMING_SNAKE_CASE =get_size_dict(_A , param_name='''crop_size''' , default_to_square=_A ) _SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale _SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor _SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize _SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean _SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std _SCREAMING_SNAKE_CASE =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _SCREAMING_SNAKE_CASE =make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _SCREAMING_SNAKE_CASE =[convert_to_rgb(_A ) for image in images] # All transformations expect numpy arrays. _SCREAMING_SNAKE_CASE =[to_numpy_array(_A ) for image in images] if do_resize: _SCREAMING_SNAKE_CASE =[self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: _SCREAMING_SNAKE_CASE =[self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: _SCREAMING_SNAKE_CASE =[self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: _SCREAMING_SNAKE_CASE =[self.normalize(image=_A , mean=_A , std=_A ) for image in images] _SCREAMING_SNAKE_CASE =[to_channel_dimension_format(_A , _A ) for image in images] _SCREAMING_SNAKE_CASE ={"""pixel_values""": images} return BatchFeature(data=_A , tensor_type=_A )
255
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class a : def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]="resnet50" , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , ) -> List[Any]: lowerCamelCase_ = parent lowerCamelCase_ = out_indices if out_indices is not None else [4] lowerCamelCase_ = stage_names lowerCamelCase_ = out_features lowerCamelCase_ = backbone lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = is_training def UpperCamelCase ( self : List[Any] ) -> Tuple: lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = self.get_config() return config, pixel_values def UpperCamelCase ( self : Tuple ) -> Union[str, Any]: return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> int: lowerCamelCase_ = TimmBackbone(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def UpperCamelCase ( self : Any ) -> List[str]: lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE : List[str] = (TimmBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE : Dict = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {} SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : int = False def UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowerCamelCase_ = TimmBackboneModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self : Dict ) -> Union[str, Any]: lowerCamelCase_ = """resnet18""" lowerCamelCase_ = """microsoft/resnet-18""" lowerCamelCase_ = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) lowerCamelCase_ = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) lowerCamelCase_ = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def UpperCamelCase ( self : Dict ) -> str: pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def UpperCamelCase ( self : Any ) -> int: pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def UpperCamelCase ( self : str ) -> Any: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def UpperCamelCase ( self : Dict ) -> Optional[Any]: pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def UpperCamelCase ( self : int ) -> List[Any]: pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def UpperCamelCase ( self : Optional[Any] ) -> str: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def UpperCamelCase ( self : int ) -> Union[str, Any]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def UpperCamelCase ( self : List[Any] ) -> Optional[Any]: pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def UpperCamelCase ( self : Union[str, Any] ) -> Tuple: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def UpperCamelCase ( self : Any ) -> List[Any]: pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def UpperCamelCase ( self : Dict ) -> Any: pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def UpperCamelCase ( self : List[Any] ) -> int: pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def UpperCamelCase ( self : List[Any] ) -> Optional[int]: pass @unittest.skip('Safetensors is not supported by timm.' ) def UpperCamelCase ( self : Dict ) -> Optional[Any]: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: pass def UpperCamelCase ( self : Optional[int] ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : int ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True lowerCamelCase_ = self.has_attentions # no need to test all models as different heads yield the same functionality lowerCamelCase_ = self.all_model_classes[0] lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = outputs[0][-1] # Encoder-/Decoder-only models lowerCamelCase_ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: lowerCamelCase_ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def UpperCamelCase ( self : Union[str, Any] ) -> Tuple: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None lowerCamelCase_ = copy.deepcopy(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = None lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights lowerCamelCase_ = copy.deepcopy(__SCREAMING_SNAKE_CASE ) lowerCamelCase_ = False lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
549
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __snake_case (__SCREAMING_SNAKE_CASE ): __a = """M-CLIP""" def __init__( self: Optional[int] , A_: Dict=10_24 , A_: List[Any]=7_68 , **A_: Any ): __lowerCamelCase = transformerDimSize __lowerCamelCase = imageDimSize super().__init__(**A_ ) class __snake_case (__SCREAMING_SNAKE_CASE ): __a = MCLIPConfig def __init__( self: Any , A_: Tuple , *A_: Tuple , **A_: List[str] ): super().__init__(A_ , *A_ , **A_ ) __lowerCamelCase = XLMRobertaModel(A_ ) __lowerCamelCase = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __a ( self: Optional[Any] , A_: Optional[Any] , A_: str ): __lowerCamelCase = self.transformer(input_ids=A_ , attention_mask=A_ )[0] __lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(A_ ), embs
281
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : List[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowercase__ : List[str] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowercase ( _a , _a , _a , _a , _a ): for attribute in key.split('''.''' ): snake_case_ : Optional[int] = getattr(_a , _a ) if weight_type is not None: snake_case_ : str = getattr(_a , _a ).shape else: snake_case_ : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case_ : Dict = value elif weight_type == "weight_g": snake_case_ : Optional[Any] = value elif weight_type == "weight_v": snake_case_ : str = value elif weight_type == "bias": snake_case_ : Tuple = value else: snake_case_ : Tuple = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowercase ( _a , _a ): snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = fairseq_model.state_dict() snake_case_ : Union[str, Any] = hf_model.feature_extractor snake_case_ : Any = hf_model.adapter for name, value in fairseq_dict.items(): snake_case_ : Dict = False if "conv_layers" in name: load_conv_layer( _a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , ) snake_case_ : int = True elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ): load_adapter(_a , _a , _a , _a ) snake_case_ : List[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: snake_case_ : Optional[Any] = True if "*" in mapped_key: snake_case_ : Union[str, Any] = name.split(_a )[0].split('''.''' )[-2] snake_case_ : Tuple = mapped_key.replace('''*''' , _a ) if "weight_g" in name: snake_case_ : Union[str, Any] = """weight_g""" elif "weight_v" in name: snake_case_ : Dict = """weight_v""" elif "bias" in name: snake_case_ : Optional[Any] = """bias""" elif "weight" in name: snake_case_ : Union[str, Any] = """weight""" else: snake_case_ : int = None set_recursively(_a , _a , _a , _a , _a ) continue if not is_used: unused_weights.append(_a ) logger.warning(f"Unused weights: {unused_weights}" ) def __lowercase ( _a , _a , _a , _a , _a ): snake_case_ : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] snake_case_ : Tuple = name.split('''.''' ) snake_case_ : Union[str, Any] = int(items[0] ) snake_case_ : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case_ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case_ : Optional[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case_ : str = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case_ : Tuple = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_a ) def __lowercase ( _a , _a , _a , _a ): snake_case_ : str = full_name.split('''adaptor.''' )[-1] snake_case_ : List[str] = name.split('''.''' ) if items[1].isdigit(): snake_case_ : Optional[int] = int(items[1] ) else: snake_case_ : List[Any] = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." snake_case_ : Optional[int] = value logger.info(f"Adapter proj layer norm bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." snake_case_ : List[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." snake_case_ : Optional[int] = value logger.info(f"Adapter proj layer bias was initialized from {full_name}." ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." snake_case_ : List[str] = value logger.info(f"Adapter proj layer weight was initialized from {full_name}." ) elif isinstance(_a , _a ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." snake_case_ : str = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." snake_case_ : Union[str, Any] = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." ) else: unused_weights.append(_a ) def __lowercase ( _a ): snake_case_ : int = emb.weight.shape snake_case_ : Optional[Any] = nn.Linear(_a , _a , bias=_a ) snake_case_ : Optional[Any] = emb.weight.data return lin_layer @torch.no_grad() def __lowercase ( _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a , ): snake_case_ : List[str] = WavaVecaConfig.from_pretrained( _a , add_adapter=_a , adapter_stride=_a , adapter_kernel_size=_a , use_auth_token=_a , output_hidden_size=_a , ) snake_case_ : List[Any] = MBartConfig.from_pretrained(_a ) # load model snake_case_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ '''config_yaml''': config_yaml_path, '''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path, '''load_pretrained_decoder_from''': None, } , ) snake_case_ : Optional[int] = model[0].eval() # load feature extractor snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained(_a , use_auth_token=_a ) # set weights for wav2vec2 encoder snake_case_ : Dict = WavaVecaModel(_a ) recursively_load_weights_wavaveca(model.encoder , _a ) # load decoder weights snake_case_ : int = MBartForCausalLM(_a ) snake_case_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a ) logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case_ : Any = SpeechEncoderDecoderModel(encoder=_a , decoder=_a ) snake_case_ : Dict = False snake_case_ : str = MBartaaTokenizer(_a ) tokenizer.save_pretrained(_a ) snake_case_ : Dict = hf_wavavec.config.to_dict() snake_case_ : Dict = tokenizer.pad_token_id snake_case_ : Optional[Any] = tokenizer.bos_token_id snake_case_ : List[Any] = tokenizer.eos_token_id snake_case_ : Optional[Any] = """mbart50""" snake_case_ : List[Any] = """wav2vec2""" snake_case_ : Union[str, Any] = tokenizer.eos_token_id snake_case_ : Any = 250_004 snake_case_ : Optional[Any] = tokenizer.eos_token_id snake_case_ : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(_a ) hf_wavavec.save_pretrained(_a ) feature_extractor.save_pretrained(_a ) if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=10_24, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=25_00_04, type=int, help='''`decoder_start_token_id` of model config''') lowercase__ : str = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
123
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _UpperCamelCase ( unittest.TestCase): '''simple docstring''' def a__ ( self ) -> Optional[Any]: lowercase : Union[str, Any] = tempfile.mkdtemp() lowercase : List[Any] = BlipImageProcessor() lowercase : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) lowercase : List[Any] = BlipProcessor(a_ , a_ ) processor.save_pretrained(self.tmpdirname ) def a__ ( self , **a_ ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).tokenizer def a__ ( self , **a_ ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor def a__ ( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> List[Any]: lowercase : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowercase : Union[str, Any] = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self ) -> Any: lowercase : List[str] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase : Tuple = self.get_image_processor(do_normalize=a_ , padding_value=1.0 ) lowercase : Dict = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , a_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , a_ ) def a__ ( self ) -> List[Any]: lowercase : Dict = self.get_image_processor() lowercase : List[str] = self.get_tokenizer() lowercase : Dict = BlipProcessor(tokenizer=a_ , image_processor=a_ ) lowercase : Optional[int] = self.prepare_image_inputs() lowercase : Union[str, Any] = image_processor(a_ , return_tensors="np" ) lowercase : Optional[Any] = processor(images=a_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a__ ( self ) -> List[Any]: lowercase : Dict = self.get_image_processor() lowercase : Optional[Any] = self.get_tokenizer() lowercase : Tuple = BlipProcessor(tokenizer=a_ , image_processor=a_ ) lowercase : Any = """lower newer""" lowercase : int = processor(text=a_ ) lowercase : List[str] = tokenizer(a_ , return_token_type_ids=a_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a__ ( self ) -> Optional[int]: lowercase : str = self.get_image_processor() lowercase : Dict = self.get_tokenizer() lowercase : Union[str, Any] = BlipProcessor(tokenizer=a_ , image_processor=a_ ) lowercase : Union[str, Any] = """lower newer""" lowercase : str = self.prepare_image_inputs() lowercase : List[Any] = processor(text=a_ , images=a_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(a_ ): processor() def a__ ( self ) -> Optional[Any]: lowercase : List[Any] = self.get_image_processor() lowercase : Optional[int] = self.get_tokenizer() lowercase : Optional[int] = BlipProcessor(tokenizer=a_ , image_processor=a_ ) lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase : Optional[Any] = processor.batch_decode(a_ ) lowercase : int = tokenizer.batch_decode(a_ ) self.assertListEqual(a_ , a_ ) def a__ ( self ) -> Optional[int]: lowercase : str = self.get_image_processor() lowercase : int = self.get_tokenizer() lowercase : Tuple = BlipProcessor(tokenizer=a_ , image_processor=a_ ) lowercase : List[str] = """lower newer""" lowercase : Optional[Any] = self.prepare_image_inputs() lowercase : int = processor(text=a_ , images=a_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
372
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a_ = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCamelCase__ ( _a): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCamelCase__ ( _a , _a): if args.student_type == "roberta": SCREAMING_SNAKE_CASE : Union[str, Any] = False elif args.student_type == "gpt2": SCREAMING_SNAKE_CASE : Optional[int] = False def lowerCamelCase__ ( _a , _a): if args.student_type == "roberta": SCREAMING_SNAKE_CASE : str = False def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(description="Training") parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path" , type=_a , required=_a , help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument( "--data_file" , type=_a , required=_a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_a , choices=["distilbert", "roberta", "gpt2"] , required=_a , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_a , required=_a , help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights" , default=_a , type=_a , help="Load student initialization checkpoint.") parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_a , help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name" , type=_a , required=_a , help="The teacher model.") parser.add_argument("--temperature" , default=2.0 , type=_a , help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce" , default=0.5 , type=_a , help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument( "--alpha_mlm" , default=0.0 , type=_a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_a , help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse" , default=0.0 , type=_a , help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos" , default=0.0 , type=_a , help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=_a , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_a , help="Proportion of tokens to mask out.") parser.add_argument("--word_keep" , default=0.1 , type=_a , help="Proportion of tokens to keep.") parser.add_argument("--word_rand" , default=0.1 , type=_a , help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_a , help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=_a , default=3 , help="Number of pass on the whole dataset.") parser.add_argument("--batch_size" , type=_a , default=5 , help="Batch size (for each process).") parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_a , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=_a , help="Linear warmup proportion.") parser.add_argument("--weight_decay" , default=0.0 , type=_a , help="Weight decay if we apply some.") parser.add_argument("--learning_rate" , default=5E-4 , type=_a , help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon" , default=1E-6 , type=_a , help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm" , default=5.0 , type=_a , help="Max gradient norm.") parser.add_argument("--initializer_range" , default=0.02 , type=_a , help="Random initialization range.") parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_a , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_a , default=1 , help="Number of GPUs in the node.") parser.add_argument("--local_rank" , type=_a , default=-1 , help="Distributed training - Local rank") parser.add_argument("--seed" , type=_a , default=56 , help="Random seed") parser.add_argument("--log_interval" , type=_a , default=500 , help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval" , type=_a , default=4000 , help="Checkpoint interval.") SCREAMING_SNAKE_CASE : str = parser.parse_args() sanity_checks(_a) # ARGS # init_gpu_params(_a) set_seed(_a) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" " itUse `--force` if you want to overwrite it") else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f"Experiment will be dumped and logged in {args.dump_path}") # SAVE PARAMS # logger.info(f"Param: {args}") with open(os.path.join(args.dump_path , "parameters.json") , "w") as f: json.dump(vars(_a) , _a , indent=4) git_log(args.dump_path) SCREAMING_SNAKE_CASE : Any = MODEL_CLASSES[args.student_type] SCREAMING_SNAKE_CASE : Dict = MODEL_CLASSES[args.teacher_type] # TOKENIZER # SCREAMING_SNAKE_CASE : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name) SCREAMING_SNAKE_CASE : Tuple = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): SCREAMING_SNAKE_CASE : List[Any] = tokenizer.all_special_tokens.index(_a) SCREAMING_SNAKE_CASE : str = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}") SCREAMING_SNAKE_CASE : List[str] = special_tok_ids SCREAMING_SNAKE_CASE : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}") with open(args.data_file , "rb") as fp: SCREAMING_SNAKE_CASE : Union[str, Any] = pickle.load(_a) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") with open(args.token_counts , "rb") as fp: SCREAMING_SNAKE_CASE : str = pickle.load(_a) SCREAMING_SNAKE_CASE : Any = np.maximum(_a , 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): SCREAMING_SNAKE_CASE : str = 0.0 # do not predict special tokens SCREAMING_SNAKE_CASE : int = torch.from_numpy(_a) else: SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : Union[str, Any] = LmSeqsDataset(params=_a , data=_a) logger.info("Data loader created.") # STUDENT # logger.info(f"Loading student config from {args.student_config}") SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config) SCREAMING_SNAKE_CASE : Optional[Any] = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") SCREAMING_SNAKE_CASE : int = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a) else: SCREAMING_SNAKE_CASE : List[str] = student_model_class(_a) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}") logger.info("Student loaded.") # TEACHER # SCREAMING_SNAKE_CASE : Optional[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}") logger.info(f"Teacher loaded from {args.teacher_name}.") # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_a , _a) if args.freeze_token_type_embds: freeze_token_type_embeddings(_a , _a) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : List[Any] = Distiller( params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
25
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments snake_case_ : Optional[int] = logging.getLogger(__name__) @dataclass class A_ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowerCAmelCase = field( default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} ) _lowerCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to SortishSamler or not."""} ) _lowerCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) _lowerCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """whether to use adafactor"""} ) _lowerCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} ) _lowerCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} ) _lowerCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Dropout probability. Goes into model.config."""} ) _lowerCAmelCase = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} ) _lowerCAmelCase = field( default="""linear""" , metadata={"""help""": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
138
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
"""simple docstring""" import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): A_ : Union[str, Any] = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: A_ : Tuple = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def __snake_case ( __A : Any ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE : Any = numpy_to_pil(__A ) return images def __snake_case ( __A : Tuple ) -> Any: '''simple docstring''' if images.ndim == 3: SCREAMING_SNAKE_CASE : Optional[Any] = images[None, ...] SCREAMING_SNAKE_CASE : List[Any] = (images * 255).round().astype('uint8' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images SCREAMING_SNAKE_CASE : Optional[int] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images] else: SCREAMING_SNAKE_CASE : int = [Image.fromarray(__A ) for image in images] return pil_images
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
"""simple docstring""" import collections import os import re from pathlib import Path _UpperCamelCase = "src/transformers" # Matches is_xxx_available() _UpperCamelCase = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} _UpperCamelCase = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _UpperCamelCase = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available _UpperCamelCase = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") _UpperCamelCase = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _UpperCamelCase = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", _UpperCamelCase = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], _UpperCamelCase = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo _UpperCamelCase = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: _UpperCamelCase = re.compile(R"^\s*try:") # Catches a line with else: _UpperCamelCase = re.compile(R"^\s*else:") def _A( lowerCAmelCase ): if _re_test_backend.search(lowerCAmelCase ) is None: return None A__ : List[str] = [b[0] for b in _re_backend.findall(lowerCAmelCase )] backends.sort() return "_and_".join(lowerCAmelCase ) def _A( lowerCAmelCase ): with open(lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: A__ : Tuple = f.readlines() A__ : Optional[int] = 0 while line_index < len(lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure A__ : List[Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: A__ : Dict = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCAmelCase ): A__ : List[Any] = _re_one_line_import_struct.search(lowerCAmelCase ).groups()[0] A__ : Any = re.findall(r"""\[([^\]]+)\]""" , lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue A__ : Union[str, Any] = _re_import_struct_key_value.search(lowerCAmelCase ) if single_line_import_search is not None: A__ : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCAmelCase ) > 0] objects.extend(lowerCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 A__ : Dict = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. A__ : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): A__ : Tuple = lines[line_index] if _re_import_struct_add_one.search(lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCAmelCase ) is not None: A__ : List[Any] = _re_import_struct_add_many.search(lowerCAmelCase ).groups()[0].split(""", """ ) A__ : str = [obj[1:-1] for obj in imports if len(lowerCAmelCase ) > 0] objects.extend(lowerCAmelCase ) elif _re_between_brackets.search(lowerCAmelCase ) is not None: A__ : Dict = _re_between_brackets.search(lowerCAmelCase ).groups()[0].split(""", """ ) A__ : Dict = [obj[1:-1] for obj in imports if len(lowerCAmelCase ) > 0] objects.extend(lowerCAmelCase ) elif _re_quote_object.search(lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(lowerCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 A__ : Optional[int] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend A__ : Dict = [] while ( line_index < len(lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): A__ : List[Any] = lines[line_index] A__ : List[Any] = _re_import.search(lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 A__ : Tuple = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. A__ : Optional[int] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A__ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A__ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): A__ : Tuple = lines[line_index] A__ : Union[str, Any] = _re_import.search(lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 A__ : Union[str, Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _A( lowerCAmelCase , lowerCAmelCase ): def find_duplicates(lowerCAmelCase ): return [k for k, v in collections.Counter(lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] A__ : Union[str, Any] = [] for key in import_dict_objects.keys(): A__ : List[Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) A__ : Dict = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): A__ : Tuple = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _A( ): A__ : Tuple = [] for root, _, files in os.walk(lowerCAmelCase ): if "__init__.py" in files: A__ : List[str] = os.path.join(lowerCAmelCase , """__init__.py""" ) A__ : Dict = parse_init(lowerCAmelCase ) if objects is not None: A__ : Union[str, Any] = analyze_results(*lowerCAmelCase ) if len(lowerCAmelCase ) > 0: A__ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCAmelCase ) ) if len(lowerCAmelCase ) > 0: raise ValueError("""\n\n""".join(lowerCAmelCase ) ) def _A( ): A__ : Any = [] for path, directories, files in os.walk(lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue A__ : Optional[int] = str((Path(lowerCAmelCase ) / folder).relative_to(lowerCAmelCase ) ) A__ : Dict = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue A__ : Optional[Any] = str((Path(lowerCAmelCase ) / fname).relative_to(lowerCAmelCase ) ) A__ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCAmelCase ) return submodules _UpperCamelCase = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def _A( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import A__ : Any = direct_transformers_import(lowerCAmelCase ) A__ : Dict = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCAmelCase , """__init__.py""" ) , """r""" ) as f: A__ : List[Any] = f.read() import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCAmelCase ) ) ) A__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCAmelCase ) > 0: A__ : Dict = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
from __future__ import annotations def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
419
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] = "laptop" ): SCREAMING_SNAKE_CASE : Union[str, Any] = F'''https://www.amazon.in/laptop/s?k={product}''' SCREAMING_SNAKE_CASE : Optional[int] = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).text ) # Initialize a Pandas dataframe with the column titles SCREAMING_SNAKE_CASE : Any = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: SCREAMING_SNAKE_CASE : List[str] = item.ha.text SCREAMING_SNAKE_CASE : Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""] SCREAMING_SNAKE_CASE : List[str] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: SCREAMING_SNAKE_CASE : Dict = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: SCREAMING_SNAKE_CASE : Optional[Any] = """Not available""" try: SCREAMING_SNAKE_CASE : Dict = ( """₹""" + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: SCREAMING_SNAKE_CASE : List[str] = """""" try: SCREAMING_SNAKE_CASE : str = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 1_00 ) except ValueError: SCREAMING_SNAKE_CASE : int = float('''nan''' ) except AttributeError: pass SCREAMING_SNAKE_CASE : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] SCREAMING_SNAKE_CASE : Union[str, Any] = """ """ SCREAMING_SNAKE_CASE : int = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": snake_case_ = """headphones""" get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
507
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" import os def _lowerCAmelCase(a : str ) -> List[Any]: _SCREAMING_SNAKE_CASE =len(grid[0] ) _SCREAMING_SNAKE_CASE =len(a ) _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(a ): for j in range(n_rows - 3 ): _SCREAMING_SNAKE_CASE =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] _SCREAMING_SNAKE_CASE =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: _SCREAMING_SNAKE_CASE =( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: _SCREAMING_SNAKE_CASE =( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) _SCREAMING_SNAKE_CASE =max( a , a , a , a ) if max_product > largest: _SCREAMING_SNAKE_CASE =max_product return largest def _lowerCAmelCase() -> Dict: _SCREAMING_SNAKE_CASE =[] with open(os.path.dirname(a ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) _SCREAMING_SNAKE_CASE =[[int(a ) for i in grid[j]] for j in range(len(a ) )] return largest_product(a ) if __name__ == "__main__": print(solution())
255
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _SCREAMING_SNAKE_CASE : int = HfArgumentParser(InitializationArguments) _SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _SCREAMING_SNAKE_CASE : Optional[int] = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) _SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
549
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
"""simple docstring""" from collections import deque from .hash_table import HashTable class __snake_case (__SCREAMING_SNAKE_CASE ): def __init__( self: int , *A_: Optional[int] , **A_: int ): super().__init__(*A_ , **A_ ) def __a ( self: int , A_: Optional[int] , A_: Tuple ): __lowerCamelCase = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(A_ ) __lowerCamelCase = self.values[key] def __a ( self: Dict ): return ( sum(self.charge_factor - len(A_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def __a ( self: Optional[Any] , A_: int , A_: Any=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(A_ ) == 0 ): return key return super()._collision_resolution(A_ , A_ )
281
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowercase__ : List[str] = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowercase__ : Union[str, Any] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowercase ( _a ): if "://" in dataset_path: snake_case_ : Optional[int] = dataset_path.split('''://''' )[1] return dataset_path def __lowercase ( _a ): if fs is not None and fs.protocol != "file": return True else: return False def __lowercase ( _a , _a , _a ): snake_case_ : Any = not is_remote_filesystem(_a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_a ) , fs._strip_protocol(_a ) ) else: fs.mv(_a , _a , recursive=_a ) def __lowercase ( ): if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: snake_case_ : Dict = None snake_case_ : Any = None snake_case_ : Tuple = threading.Lock()
123
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Any = { """tokenizer_file""": { """bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""", """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""", }, } class _UpperCamelCase ( __SCREAMING_SNAKE_CASE): '''simple docstring''' _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = ["""input_ids""", """attention_mask"""] _snake_case = None def __init__( self , a_=None , a_=None , a_=None , a_="<unk>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_=False , a_=False , **a_ , ) -> Union[str, Any]: super().__init__( a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , add_prefix_space=a_ , clean_up_tokenization_spaces=a_ , **a_ , ) lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space: lowercase : Any = getattr(a_ , pre_tok_state.pop("type" ) ) lowercase : Union[str, Any] = add_prefix_space lowercase : int = pre_tok_class(**a_ ) lowercase : Tuple = add_prefix_space def a__ ( self , *a_ , **a_ ) -> Union[str, Any]: lowercase : str = kwargs.get("is_split_into_words" , a_ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._batch_encode_plus(*a_ , **a_ ) def a__ ( self , *a_ , **a_ ) -> str: lowercase : Tuple = kwargs.get("is_split_into_words" , a_ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' " pretokenized inputs." ) return super()._encode_plus(*a_ , **a_ ) def a__ ( self , a_ , a_ = None ) -> List[str]: lowercase : Optional[Any] = self._tokenizer.model.save(a_ , name=a_ ) return tuple(a_ ) def a__ ( self , a_ ) -> Tuple: lowercase : Optional[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a_ , add_special_tokens=a_ ) + [self.eos_token_id] ) if len(a_ ) > self.model_max_length: lowercase : str = input_ids[-self.model_max_length :] return input_ids
372
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _UpperCamelCase : '''simple docstring''' def __init__( self : Tuple , a : List[str] , a : str=2 , a : str=True , a : Any=False , a : List[Any]=10 , a : Any=3 , a : List[Any]=32 * 8 , a : str=32 * 8 , a : Optional[int]=4 , a : Tuple=64 , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = parent SCREAMING_SNAKE_CASE : List[str] = batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = is_training SCREAMING_SNAKE_CASE : str = use_auxiliary_loss SCREAMING_SNAKE_CASE : Union[str, Any] = num_queries SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : List[str] = min_size SCREAMING_SNAKE_CASE : Union[str, Any] = max_size SCREAMING_SNAKE_CASE : str = num_labels SCREAMING_SNAKE_CASE : List[str] = hidden_dim SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dim def __UpperCamelCase ( self : Optional[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( a ) SCREAMING_SNAKE_CASE : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a ) SCREAMING_SNAKE_CASE : int = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5 ).float() SCREAMING_SNAKE_CASE : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long() SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __UpperCamelCase ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : int = MaskaFormerConfig( hidden_size=self.hidden_dim , ) SCREAMING_SNAKE_CASE : Tuple = self.num_queries SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 1, 1, 1] SCREAMING_SNAKE_CASE : Any = self.num_channels SCREAMING_SNAKE_CASE : Optional[Any] = 64 SCREAMING_SNAKE_CASE : Dict = 128 SCREAMING_SNAKE_CASE : Tuple = self.hidden_dim SCREAMING_SNAKE_CASE : Dict = self.hidden_dim SCREAMING_SNAKE_CASE : Dict = self.hidden_dim return config def __UpperCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def __UpperCamelCase ( self : int , a : Optional[int] , a : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = output.encoder_hidden_states SCREAMING_SNAKE_CASE : str = output.pixel_decoder_hidden_states SCREAMING_SNAKE_CASE : Union[str, Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(a ) , config.decoder_layers ) def __UpperCamelCase ( self : Optional[int] , a : Optional[Any] , a : List[str] , a : List[Any] , a : Tuple=False ) -> Any: """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE : Dict = MaskaFormerModel(config=a ) model.to(a ) model.eval() SCREAMING_SNAKE_CASE : int = model(pixel_values=a , pixel_mask=a ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , output_hidden_states=a ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(a , a ) def __UpperCamelCase ( self : Union[str, Any] , a : str , a : Optional[int] , a : str , a : Any , a : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = MaskaFormerForUniversalSegmentation(config=a ) model.to(a ) model.eval() def comm_check_on_output(a : Union[str, Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Union[str, Any] = model(pixel_values=a , pixel_mask=a ) SCREAMING_SNAKE_CASE : List[Any] = model(a ) comm_check_on_output(a ) SCREAMING_SNAKE_CASE : int = model( pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a ) comm_check_on_output(a ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () lowerCamelCase__ ={"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False lowerCamelCase__ =False def __UpperCamelCase ( self : Dict ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = MaskaFormerModelTester(self ) SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , has_text_modality=a ) def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a ) def __UpperCamelCase ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a ) @unittest.skip(reason="Mask2Former does not use inputs_embeds" ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" ) def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="Mask2Former is not a generative model" ) def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="Mask2Former does not use token embeddings" ) def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase ( self : List[str] ) -> Tuple: """simple docstring""" pass def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Dict = model_class(a ) SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , a ) @slow def __UpperCamelCase ( self : List[Any] ) -> Any: """simple docstring""" for model_name in ["facebook/mask2former-swin-small-coco-instance"]: SCREAMING_SNAKE_CASE : int = MaskaFormerModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = (self.model_tester.min_size,) * 2 SCREAMING_SNAKE_CASE : int = { """pixel_values""": torch.randn((2, 3, *size) , device=a ), """mask_labels""": torch.randn((2, 10, *size) , device=a ), """class_labels""": torch.zeros(2 , 10 , device=a ).long(), } SCREAMING_SNAKE_CASE : int = self.model_tester.get_config() SCREAMING_SNAKE_CASE : Optional[int] = MaskaFormerForUniversalSegmentation(a ).to(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a ) self.assertTrue(outputs.loss is not None ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(a , **a , output_hidden_states=a ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : str = model_class(a ).to(a ) SCREAMING_SNAKE_CASE : Any = model(**a , output_attentions=a ) self.assertTrue(outputs.attentions is not None ) def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE : List[Any] = self.all_model_classes[1] SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Optional[int] = model_class(a ) model.to(a ) model.train() SCREAMING_SNAKE_CASE : Dict = model(a , mask_labels=a , class_labels=a ).loss loss.backward() def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.all_model_classes[1] SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE : Optional[int] = True SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : int = model_class(a ).to(a ) model.train() SCREAMING_SNAKE_CASE : str = model(a , mask_labels=a , class_labels=a ) SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE : Optional[Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE : Any = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=a ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) a_ = 1E-4 def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" return "facebook/mask2former-swin-small-coco-instance" @cached_property def __UpperCamelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a ) SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor SCREAMING_SNAKE_CASE : Tuple = prepare_img() SCREAMING_SNAKE_CASE : int = image_processor(a , return_tensors="pt" ).to(a ) SCREAMING_SNAKE_CASE : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(a , (1, 3, 384, 384) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**a ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) ) SCREAMING_SNAKE_CASE : Any = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) ) SCREAMING_SNAKE_CASE : int = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) ) def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval() SCREAMING_SNAKE_CASE : Any = self.default_image_processor SCREAMING_SNAKE_CASE : str = prepare_img() SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).to(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(a , (1, 3, 384, 384) ) with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(**a ) # masks_queries_logits SCREAMING_SNAKE_CASE : List[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] SCREAMING_SNAKE_CASE : int = torch.tensor(a ).to(a ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) ) # class_queries_logits SCREAMING_SNAKE_CASE : int = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(a ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) ) def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a ).eval() SCREAMING_SNAKE_CASE : int = self.default_image_processor SCREAMING_SNAKE_CASE : int = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) SCREAMING_SNAKE_CASE : Tuple = inputs["""pixel_values"""].to(a ) SCREAMING_SNAKE_CASE : str = [el.to(a ) for el in inputs["""mask_labels"""]] SCREAMING_SNAKE_CASE : List[Any] = [el.to(a ) for el in inputs["""class_labels"""]] with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**a ) self.assertTrue(outputs.loss is not None )
25
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
'''simple docstring''' def lowercase__( _UpperCamelCase : Tuple = 1000 )-> Dict: """simple docstring""" return sum(e for e in range(3 , _UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"""{solution() = }""")
138
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ): lowerCamelCase : Tuple = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Dict = embeddings_size lowerCamelCase : Optional[int] = hidden_sizes lowerCamelCase : Union[str, Any] = depths lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Dict = hidden_act lowerCamelCase : Any = num_labels lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = len(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Dict = TFResNetModel(config=__magic_name__ ) lowerCamelCase : Tuple = model(__magic_name__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : str = self.num_labels lowerCamelCase : Dict = TFResNetForImageClassification(__magic_name__ ) lowerCamelCase : Union[str, Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase : List[str] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Any = False def UpperCamelCase__ ( self ): lowerCamelCase : int = TFResNetModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCamelCase__ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def UpperCamelCase__ ( self ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(__magic_name__ ) lowerCamelCase : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ): def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = model_class(__magic_name__ ) lowerCamelCase : List[Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase : Union[str, Any] = layer_type lowerCamelCase : str = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCamelCase__ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFResNetModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def _a ( ): lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): @cached_property def UpperCamelCase__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase : List[str] = self.default_image_processor lowerCamelCase : str = prepare_img() lowerCamelCase : Tuple = image_processor(images=__magic_name__ , return_tensors="""tf""" ) # forward pass lowerCamelCase : Tuple = model(**__magic_name__ ) # verify the logits lowerCamelCase : Optional[Any] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCamelCase : Optional[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __magic_name__ , atol=1e-4 ) )
681
0
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' _SCREAMING_SNAKE_CASE : str = (DPMSolverSinglestepScheduler,) _SCREAMING_SNAKE_CASE : str = (("""num_inference_steps""", 25),) def _lowerCAmelCase ( self : int , **_SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Any = { """num_train_timesteps""": 1_000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, """prediction_type""": """epsilon""", """thresholding""": False, """sample_max_value""": 1.0, """algorithm_type""": """dpmsolver++""", """solver_type""": """midpoint""", """lambda_min_clipped""": -float('inf' ), """variance_type""": None, } config.update(**_SCREAMING_SNAKE_CASE ) return config def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : List[str]=0 , **_SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = self.dummy_sample SCREAMING_SNAKE_CASE : int = 0.1 * sample SCREAMING_SNAKE_CASE : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # copy over dummy past residuals SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE : List[str] = sample, sample for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): SCREAMING_SNAKE_CASE : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample SCREAMING_SNAKE_CASE : Dict = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass def _lowerCAmelCase ( self : List[str] , _SCREAMING_SNAKE_CASE : str=0 , **_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = dict(self.forward_default_kwargs ) SCREAMING_SNAKE_CASE : str = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = self.dummy_sample SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample SCREAMING_SNAKE_CASE : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**_SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[: new_scheduler.config.solver_order] SCREAMING_SNAKE_CASE : Dict = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowerCAmelCase ( self : str , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" if scheduler is None: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = 10 SCREAMING_SNAKE_CASE : List[str] = self.dummy_model() SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Any = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample return sample def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) SCREAMING_SNAKE_CASE : str = 50 SCREAMING_SNAKE_CASE : Dict = self.dummy_model() SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): SCREAMING_SNAKE_CASE : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3 def _lowerCAmelCase ( self : Any ) -> str: """simple docstring""" for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 SCREAMING_SNAKE_CASE : List[Any] = DEISMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : Any = DPMSolverMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : int = DPMSolverSinglestepScheduler.from_config(scheduler.config ) SCREAMING_SNAKE_CASE : List[Any] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , algorithm_type='dpmsolver++' , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , ) def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , algorithm_type=_SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE : Tuple = self.full_loop( solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , algorithm_type=_SCREAMING_SNAKE_CASE , ) assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE ) self.check_over_configs(variance_type='learned_range' ) def _lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 ) def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.full_loop() SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3 def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.full_loop(use_karras_sigmas=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3 def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(prediction_type='v_prediction' ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3 def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) SCREAMING_SNAKE_CASE : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = 10 SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(_SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : List[str] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa
265
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowerCamelCase : str = MobileBertConfig.from_json_file(lowerCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : Tuple = MobileBertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint lowerCamelCase : Tuple = load_tf_weights_in_mobilebert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCamelCase =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
681
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black _UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _UpperCamelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class __UpperCAmelCase (unittest.TestCase ): '''simple docstring''' def lowerCamelCase ( self ): '''simple docstring''' A__ : Optional[int] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) ) A__ : Any = self.diffusers_dir shutil.copy( os.path.join(snake_case_ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = """src/diffusers""" shutil.rmtree(self.diffusers_dir ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ): '''simple docstring''' A__ : Tuple = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: A__ : Any = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result A__ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) A__ : List[str] = black.format_str(snake_case_ , mode=snake_case_ ) A__ : Optional[Any] = os.path.join(self.diffusers_dir , """new_code.py""" ) with open(snake_case_ , """w""" , newline="""\n""" ) as f: f.write(snake_case_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=snake_case_ ) with open(snake_case_ , """r""" ) as f: self.assertTrue(f.read() , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ) self.assertEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , snake_case_ , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , snake_case_ ) , ) # Copy consistency with a really long name A__ : int = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("""Bert""" , snake_case_ , snake_case_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , snake_case_ , overwrite_result=re.sub("""DDPM""" , """Test""" , snake_case_ ) , )
363
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _a ( lowerCamelCase ): # vision encoder if "img_encoder.pos_embed" in name: lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" ) if "attn" in name and "pre_assign" not in name: lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" ) if "pre_assign_attn.attn.proj" in name: lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" ) if "img_encoder.norm" in name: lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" ) if "ln_1" in name: lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" ) if "ln_2" in name: lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" ) if "c_fc" in name: lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" ) if "c_proj" in name: lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" ) if "text_encoder" in name: lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" ) if "ln_final" in name: lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" ) if "img_projector.linear_out." in name: lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" ) if "text_projector.linear_out" in name: lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" ) return name def _a ( lowerCamelCase, lowerCamelCase ): for key in orig_state_dict.copy().keys(): lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : Any = key.split(""".""" ) lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] ) lowerCamelCase : List[Any] = config.vision_config.hidden_size if "weight" in key: lowerCamelCase : int = val[:dim, :] lowerCamelCase : List[str] = val[dim : dim * 2, :] lowerCamelCase : Dict = val[-dim:, :] else: lowerCamelCase : List[Any] = val[:dim] lowerCamelCase : List[Any] = val[dim : dim * 2] lowerCamelCase : Tuple = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCamelCase : str = key.split(""".""" ) lowerCamelCase : Optional[int] = int(key_split[3] ) lowerCamelCase : List[str] = config.text_config.hidden_size if "weight" in key: lowerCamelCase : Optional[int] = val[:dim, :] lowerCamelCase : Any = val[ dim : dim * 2, : ] lowerCamelCase : Optional[Any] = val[-dim:, :] else: lowerCamelCase : Union[str, Any] = val[:dim] lowerCamelCase : Optional[int] = val[dim : dim * 2] lowerCamelCase : Union[str, Any] = val[-dim:] else: lowerCamelCase : List[Any] = rename_key(lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCamelCase : Any = val.squeeze_() else: lowerCamelCase : Union[str, Any] = val return orig_state_dict def _a ( ): lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) return im @torch.no_grad() def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ): lowerCamelCase : int = GroupViTConfig() lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval() lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""] lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase ) lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0) # verify result lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) lowerCamelCase : int = prepare_img() lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase : int = model(**lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 ) processor.save_pretrained(lowerCamelCase ) model.save_pretrained(lowerCamelCase ) print("""Successfully saved processor and model to""", lowerCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase, organization="""nielsr""" ) model.push_to_hub(lowerCamelCase, organization="""nielsr""" ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _lowerCamelCase =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
681
0
from __future__ import annotations import math def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ): """simple docstring""" if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , ) def _lowerCamelCase ( ): """simple docstring""" a_ : List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] a_ : Dict = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
419
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A__ : # setable values _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[jnp.ndarray] = None _UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def UpperCamelCase__ ( cls ): return cls() @dataclass class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : jnp.ndarray _UpperCAmelCase : KarrasVeSchedulerState class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): return True @register_to_config def __init__( self , __magic_name__ = 0.02 , __magic_name__ = 1_0_0 , __magic_name__ = 1.007 , __magic_name__ = 8_0 , __magic_name__ = 0.05 , __magic_name__ = 5_0 , ): pass def UpperCamelCase__ ( self ): return KarrasVeSchedulerState.create() def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = () ): lowerCamelCase : Dict = jnp.arange(0 , __magic_name__ )[::-1].copy() lowerCamelCase : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__magic_name__ , schedule=jnp.array(__magic_name__ , dtype=jnp.floataa ) , timesteps=__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase : List[Any] = random.split(__magic_name__ , num=1 ) lowerCamelCase : Union[str, Any] = self.config.s_noise * random.normal(key=__magic_name__ , shape=sample.shape ) lowerCamelCase : List[Any] = sigma + gamma * sigma lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : Optional[Any] = sample_hat + sigma_hat * model_output lowerCamelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ): lowerCamelCase : str = sample_prev + sigma_prev * model_output lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__magic_name__ , derivative=__magic_name__ , state=__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): raise NotImplementedError()
681
0
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers snake_case_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def __lowercase (): SCREAMING_SNAKE_CASE : Optional[int] = os.path.dirname(os.path.realpath(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : int = os.path.join(_SCREAMING_SNAKE_CASE , '''words.txt''' ) SCREAMING_SNAKE_CASE : int = """""" with open(_SCREAMING_SNAKE_CASE ) as f: SCREAMING_SNAKE_CASE : Union[str, Any] = f.readline() SCREAMING_SNAKE_CASE : List[str] = [word.strip('''\"''' ) for word in words.strip('''\r\n''' ).split(''',''' )] SCREAMING_SNAKE_CASE : Tuple = [ word for word in [sum(ord(_SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
507
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[str] = k_size // 2 lowerCamelCase , lowerCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCamelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase ) + square(lowerCamelCase )) / (2 * square(lowerCamelCase )) ) return g def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width lowerCamelCase : Dict = height - k_size + 1 lowerCamelCase : str = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) ) lowerCamelCase : List[Any] = 0 for i, j in product(range(lowerCamelCase ), range(lowerCamelCase ) ): lowerCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] ) lowerCamelCase : Union[str, Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCamelCase : Dict = gen_gaussian_kernel(lowerCamelCase, lowerCamelCase ) lowerCamelCase : str = ravel(lowerCamelCase ) # reshape and get the dst image lowerCamelCase : List[str] = dot(lowerCamelCase, lowerCamelCase ).reshape(lowerCamelCase, lowerCamelCase ).astype(lowerCamelCase ) return dst if __name__ == "__main__": # read original image _lowerCamelCase =imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _lowerCamelCase =cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _lowerCamelCase =gaussian_filter(gray, 3, sigma=1) _lowerCamelCase =gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
681
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = torch.device('''cpu''') def _lowerCAmelCase() -> List[Any]: _SCREAMING_SNAKE_CASE ="""http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE =Image.open(requests.get(a , stream=a ).raw ) return im def _lowerCAmelCase(a : Union[str, Any] ) -> List[Any]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def _lowerCAmelCase(a : str , a : Dict , a : Tuple ) -> Tuple: _SCREAMING_SNAKE_CASE =dct.pop(a ) _SCREAMING_SNAKE_CASE =val def _lowerCAmelCase(a : Tuple ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE =[] for k in state_dict.keys(): _SCREAMING_SNAKE_CASE =k if ".pwconv" in k: _SCREAMING_SNAKE_CASE =k_new.replace('''.pwconv''' , '''.point_wise_conv''' ) if ".dwconv" in k: _SCREAMING_SNAKE_CASE =k_new.replace('''.dwconv''' , '''.depth_wise_conv''' ) if ".Proj." in k: _SCREAMING_SNAKE_CASE =k_new.replace('''.Proj.''' , '''.proj.''' ) if "patch_embed" in k_new: _SCREAMING_SNAKE_CASE =k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' ) if "network" in k_new: _SCREAMING_SNAKE_CASE =k_new.split('''.''' ) if ls[2].isdigit(): _SCREAMING_SNAKE_CASE ="""swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _SCREAMING_SNAKE_CASE =k_new.replace('''network''' , '''swiftformer.encoder.network''' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def _lowerCAmelCase(a : Optional[int] , a : Optional[int] , a : Optional[Any] ) -> List[str]: _SCREAMING_SNAKE_CASE =SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _SCREAMING_SNAKE_CASE =1000 _SCREAMING_SNAKE_CASE ="""huggingface/label-files""" _SCREAMING_SNAKE_CASE ="""imagenet-1k-id2label.json""" _SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) _SCREAMING_SNAKE_CASE ={int(a ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE =idalabel _SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _SCREAMING_SNAKE_CASE =[3, 3, 6, 4] _SCREAMING_SNAKE_CASE =[48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": _SCREAMING_SNAKE_CASE =[3, 3, 9, 6] _SCREAMING_SNAKE_CASE =[48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": _SCREAMING_SNAKE_CASE =[4, 3, 10, 5] _SCREAMING_SNAKE_CASE =[48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": _SCREAMING_SNAKE_CASE =[4, 4, 12, 6] _SCREAMING_SNAKE_CASE =[64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https''' ): _SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a ) else: _SCREAMING_SNAKE_CASE =torch.load(a , map_location='''cpu''' ) _SCREAMING_SNAKE_CASE =checkpoint _SCREAMING_SNAKE_CASE =create_rename_keys(a ) for rename_key_src, rename_key_dest in rename_keys: rename_key(a , a , a ) # load HuggingFace model _SCREAMING_SNAKE_CASE =SwiftFormerForImageClassification(a ).eval() hf_model.load_state_dict(a ) # prepare test inputs _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained('''preprocessor_config''' ) _SCREAMING_SNAKE_CASE =processor(images=a , return_tensors='''pt''' ) # compare outputs from both models _SCREAMING_SNAKE_CASE =get_expected_output(a ) _SCREAMING_SNAKE_CASE =hf_model(inputs['''pixel_values'''] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , a , atol=1E-3 ) Path(a ).mkdir(exist_ok=a ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(a ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') UpperCAmelCase_ : List[Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
255
import pytest _lowerCamelCase ="""__dummy_dataset1__""" _lowerCamelCase =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _a ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Union[str, Any] = dataset_loading_script_name lowerCamelCase : Dict = tmp_path / """datasets""" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowerCamelCase : str = script_dir / F'''{script_name}.py''' with open(lowerCamelCase, """w""" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
681
0
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np _SCREAMING_SNAKE_CASE : Dict = re.compile(R'''\b(a|an|the)\b''', re.UNICODE) _SCREAMING_SNAKE_CASE : Union[str, Any] = None def lowerCamelCase__ ( ) -> List[str]: lowerCamelCase_ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=_lowerCamelCase , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCamelCase , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Tuple: lowerCamelCase_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCamelCase_ = bool(qa['answers']['text'] ) return qid_to_has_ans def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> Tuple: def remove_articles(_lowerCamelCase : Any ): return ARTICLES_REGEX.sub(' ' , _lowerCamelCase ) def white_space_fix(_lowerCamelCase : List[Any] ): return " ".join(text.split() ) def remove_punc(_lowerCamelCase : Any ): lowerCamelCase_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) ) def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Any: if not s: return [] return normalize_answer(_lowerCamelCase ).split() def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ) -> Optional[int]: return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) ) def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ) -> int: lowerCamelCase_ = get_tokens(_lowerCamelCase ) lowerCamelCase_ = get_tokens(_lowerCamelCase ) lowerCamelCase_ = collections.Counter(_lowerCamelCase ) & collections.Counter(_lowerCamelCase ) lowerCamelCase_ = sum(common.values() ) if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowerCamelCase_ = 1.0 * num_same / len(_lowerCamelCase ) lowerCamelCase_ = 1.0 * num_same / len(_lowerCamelCase ) lowerCamelCase_ = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ) -> str: lowerCamelCase_ = {} lowerCamelCase_ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowerCamelCase_ = qa["""id"""] lowerCamelCase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_lowerCamelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowerCamelCase_ = [""""""] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue lowerCamelCase_ = preds[qid] # Take max over all gold answers lowerCamelCase_ = max(compute_exact(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers ) lowerCamelCase_ = max(compute_fa(_lowerCamelCase , _lowerCamelCase ) for a in gold_answers ) return exact_scores, fa_scores def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> Tuple: lowerCamelCase_ = {} for qid, s in scores.items(): lowerCamelCase_ = na_probs[qid] > na_prob_thresh if pred_na: lowerCamelCase_ = float(not qid_to_has_ans[qid] ) else: lowerCamelCase_ = s return new_scores def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[str]=None ) -> int: if not qid_list: lowerCamelCase_ = len(_lowerCamelCase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores.values() ) / total), ('f1', 100.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: lowerCamelCase_ = len(_lowerCamelCase ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def lowerCamelCase__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Any ) -> Optional[Any]: for k in new_eval: lowerCamelCase_ = new_eval[k] def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ) -> Dict: plt.step(_lowerCamelCase , _lowerCamelCase , color='b' , alpha=0.2 , where='post' ) plt.fill_between(_lowerCamelCase , _lowerCamelCase , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_lowerCamelCase ) plt.savefig(_lowerCamelCase ) plt.clf() def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Dict=None , _lowerCamelCase : List[str]=None ) -> List[str]: lowerCamelCase_ = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : na_probs[k] ) lowerCamelCase_ = 0.0 lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.0 lowerCamelCase_ = [1.0] lowerCamelCase_ = [0.0] lowerCamelCase_ = 0.0 for i, qid in enumerate(_lowerCamelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowerCamelCase_ = true_pos / float(i + 1 ) lowerCamelCase_ = true_pos / float(_lowerCamelCase ) if i == len(_lowerCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_lowerCamelCase ) recalls.append(_lowerCamelCase ) if out_image: plot_pr_curve(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return {"ap": 100.0 * avg_prec} def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ) -> Optional[Any]: if out_image_dir and not os.path.exists(_lowerCamelCase ): os.makedirs(_lowerCamelCase ) lowerCamelCase_ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowerCamelCase_ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) lowerCamelCase_ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) lowerCamelCase_ = {k: float(_lowerCamelCase ) for k, v in qid_to_has_ans.items()} lowerCamelCase_ = make_precision_recall_eval( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , out_image=os.path.join(_lowerCamelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_lowerCamelCase , _lowerCamelCase , 'pr_exact' ) merge_eval(_lowerCamelCase , _lowerCamelCase , 'pr_f1' ) merge_eval(_lowerCamelCase , _lowerCamelCase , 'pr_oracle' ) def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any ) -> Dict: if not qid_list: return lowerCamelCase_ = [na_probs[k] for k in qid_list] lowerCamelCase_ = np.ones_like(_lowerCamelCase ) / float(len(_lowerCamelCase ) ) plt.hist(_lowerCamelCase , weights=_lowerCamelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(_lowerCamelCase , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ) -> List[str]: lowerCamelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowerCamelCase_ = num_no_ans lowerCamelCase_ = cur_score lowerCamelCase_ = 0.0 lowerCamelCase_ = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : na_probs[k] ) for i, qid in enumerate(_lowerCamelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: lowerCamelCase_ = scores[qid] else: if preds[qid]: lowerCamelCase_ = -1 else: lowerCamelCase_ = 0 cur_score += diff if cur_score > best_score: lowerCamelCase_ = cur_score lowerCamelCase_ = na_probs[qid] return 100.0 * best_score / len(_lowerCamelCase ), best_thresh def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ) -> List[str]: lowerCamelCase_ = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = find_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = best_exact lowerCamelCase_ = exact_thresh lowerCamelCase_ = best_fa lowerCamelCase_ = fa_thresh def lowerCamelCase__ ( ) -> Dict: with open(OPTS.data_file ) as f: lowerCamelCase_ = json.load(_lowerCamelCase ) lowerCamelCase_ = dataset_json["""data"""] with open(OPTS.pred_file ) as f: lowerCamelCase_ = json.load(_lowerCamelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowerCamelCase_ = json.load(_lowerCamelCase ) else: lowerCamelCase_ = {k: 0.0 for k in preds} lowerCamelCase_ = make_qid_to_has_ans(_lowerCamelCase ) # maps qid to True/False lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if v] lowerCamelCase_ = [k for k, v in qid_to_has_ans.items() if not v] lowerCamelCase_ = get_raw_scores(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase_ = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh ) lowerCamelCase_ = apply_no_ans_threshold(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.na_prob_thresh ) lowerCamelCase_ = make_eval_dict(_lowerCamelCase , _lowerCamelCase ) if has_ans_qids: lowerCamelCase_ = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase ) merge_eval(_lowerCamelCase , _lowerCamelCase , 'HasAns' ) if no_ans_qids: lowerCamelCase_ = make_eval_dict(_lowerCamelCase , _lowerCamelCase , qid_list=_lowerCamelCase ) merge_eval(_lowerCamelCase , _lowerCamelCase , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir ) histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(_lowerCamelCase , _lowerCamelCase , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) else: print(json.dumps(_lowerCamelCase , indent=2 ) ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Dict = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('''Agg''') import matplotlib.pyplot as plt main()
549
import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""): _lowerCamelCase ={ """linear""": PIL.Image.Resampling.BILINEAR, """bilinear""": PIL.Image.Resampling.BILINEAR, """bicubic""": PIL.Image.Resampling.BICUBIC, """lanczos""": PIL.Image.Resampling.LANCZOS, """nearest""": PIL.Image.Resampling.NEAREST, } else: _lowerCamelCase ={ """linear""": PIL.Image.LINEAR, """bilinear""": PIL.Image.BILINEAR, """bicubic""": PIL.Image.BICUBIC, """lanczos""": PIL.Image.LANCZOS, """nearest""": PIL.Image.NEAREST, } def _a ( lowerCamelCase ): lowerCamelCase : Optional[Any] = (images / 2 + 0.5).clamp(0, 1 ) lowerCamelCase : Optional[Any] = images.cpu().permute(0, 2, 3, 1 ).float().numpy() lowerCamelCase : Any = numpy_to_pil(lowerCamelCase ) return images def _a ( lowerCamelCase ): if images.ndim == 3: lowerCamelCase : Optional[Any] = images[None, ...] lowerCamelCase : List[Any] = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase : Optional[int] = [Image.fromarray(image.squeeze(), mode="""L""" ) for image in images] else: lowerCamelCase : int = [Image.fromarray(lowerCamelCase ) for image in images] return pil_images
681
0
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __snake_case (unittest.TestCase ): __a = JukeboxTokenizer __a = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __a ( self: Union[str, Any] ): import torch __lowerCamelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) __lowerCamelCase = tokenizer(**self.metas )["""input_ids"""] # fmt: off __lowerCamelCase = [ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __a ( self: List[Any] ): import torch __lowerCamelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) __lowerCamelCase = tokenizer(**self.metas )["""input_ids"""] # fmt: off __lowerCamelCase = [ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
281
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A__ ( nn.Module): def __init__( self , __magic_name__ = 1_6 , __magic_name__ = 8_8 , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 0.0 , __magic_name__ = 3_2 , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "geglu" , __magic_name__ = None , ): super().__init__() lowerCamelCase : Any = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference lowerCamelCase : Any = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` lowerCamelCase : List[Any] = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` lowerCamelCase : Optional[int] = [1, 0] def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__ = True , ): lowerCamelCase : List[Any] = hidden_states lowerCamelCase : Dict = [] lowerCamelCase : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens lowerCamelCase : Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] lowerCamelCase : Optional[int] = self.transformer_index_for_condition[i] lowerCamelCase : List[Any] = self.transformers[transformer_index]( __magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] lowerCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) lowerCamelCase : Dict = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__magic_name__ )
681
0
"""simple docstring""" def __lowercase ( _a , _a ): snake_case_ : Union[str, Any] = 0 snake_case_ : Any = len(_a ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None snake_case_ : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_a ): return None snake_case_ : Optional[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: snake_case_ : Optional[Any] = left snake_case_ : List[Any] = point elif point > right: snake_case_ : Tuple = right snake_case_ : List[Any] = point else: if item < current_item: snake_case_ : Dict = point - 1 else: snake_case_ : Union[str, Any] = point + 1 return None def __lowercase ( _a , _a , _a , _a ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None snake_case_ : Dict = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_a ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_a , _a , _a , _a ) elif point > right: return interpolation_search_by_recursion(_a , _a , _a , _a ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _a , _a , _a , point - 1 ) else: return interpolation_search_by_recursion( _a , _a , point + 1 , _a ) def __lowercase ( _a ): if collection != sorted(_a ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys lowercase__ : Union[str, Any] = 0 if debug == 1: lowercase__ : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('''Sequence must be ascending sorted to apply interpolation search''') lowercase__ : int = 67 lowercase__ : Optional[Any] = interpolation_search(collection, target) if result is not None: print(f'{target} found at positions: {result}') else: print('''Not found''')
123
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
0
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record lowerCAmelCase : Dict = """\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } """ lowerCAmelCase : List[str] = """\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. """ lowerCAmelCase : Optional[Any] = """ Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for 'record': list of question-answer dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'prediction_text': the predicted answer text - for 'multirc': list of question-answer dictionaries with the following keys: - 'idx': index of the question-answer pair as specified by the dataset - 'prediction': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for 'record': list of question-answers dictionaries with the following keys: - 'idx': index of the question as specified by the dataset - 'answers': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for 'record': - 'exact_match': Exact match between answer and gold answer - 'f1': F1 score - for 'multirc': - 'exact_match': Exact match between answer and gold answer - 'f1_m': Per-question macro-F1 score - 'f1_a': Average F1 score over all answers - for 'axb': 'matthews_correlation': Matthew Correlation - for 'cb': - 'accuracy': Accuracy - 'f1': F1 score - for all others: - 'accuracy': Accuracy Examples: >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'cb') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'record') >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}] >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc') >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0} >>> super_glue_metric = datasets.load_metric('super_glue', 'axb') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _A ( A ,A ) -> Any: return float((preds == labels).mean() ) def _A ( A ,A ,A="binary" ) -> Tuple: lowercase : Union[str, Any] = simple_accuracy(A ,A ) lowercase : Dict = float(fa_score(y_true=A ,y_pred=A ,average=A ) ) return { "accuracy": acc, "f1": fa, } def _A ( A ,A ) -> Dict: lowercase : Any = {} for id_pred, label in zip(A ,A ): lowercase : Any = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}''' lowercase : Any = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowercase : List[Any] = [(pred, label)] lowercase : int = [], [] for question, preds_labels in question_map.items(): lowercase : str = zip(*A ) lowercase : List[str] = fa_score(y_true=A ,y_pred=A ,average="macro" ) fas.append(A ) lowercase : str = int(sum(pred == label for pred, label in preds_labels ) == len(A ) ) ems.append(A ) lowercase : Any = float(sum(A ) / len(A ) ) lowercase : List[str] = sum(A ) / len(A ) lowercase : Dict = float(fa_score(y_true=A ,y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _UpperCamelCase ( datasets.Metric): '''simple docstring''' def a__ ( self ) -> List[str]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , ) def a__ ( self ) -> Any: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "prediction_text": datasets.Value("string" ), }, "references": { "idx": { "passage": datasets.Value("int64" ), "query": datasets.Value("int64" ), }, "answers": datasets.Sequence(datasets.Value("string" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("int64" ), "paragraph": datasets.Value("int64" ), "question": datasets.Value("int64" ), }, "prediction": datasets.Value("int64" ), }, "references": datasets.Value("int64" ), } else: return { "predictions": datasets.Value("int64" ), "references": datasets.Value("int64" ), } def a__ ( self , a_ , a_ ) -> List[str]: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(a_ , a_ )} elif self.config_name == "cb": return acc_and_fa(a_ , a_ , fa_avg="macro" ) elif self.config_name == "record": lowercase : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowercase : int = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(a_ , a_ )[0] elif self.config_name == "multirc": return evaluate_multirc(a_ , a_ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(a_ , a_ )} else: raise KeyError( "You should supply a configuration name selected in " "[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
372
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCamelCase =HfArgumentParser(InitializationArguments) _lowerCamelCase =parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCamelCase ={ """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) _lowerCamelCase =AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCamelCase =AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
681
0
from __future__ import annotations import queue class _UpperCamelCase : '''simple docstring''' def __init__( self : Union[str, Any] , a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = data SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : str = None def lowerCamelCase__ ( ): print("\n********Press N to stop entering at any point of time********\n") SCREAMING_SNAKE_CASE : Optional[Any] = input("Enter the value of the root node: ").strip().lower() SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() SCREAMING_SNAKE_CASE : str = TreeNode(int(_a)) q.put(_a) while not q.empty(): SCREAMING_SNAKE_CASE : Dict = q.get() SCREAMING_SNAKE_CASE : Any = f"Enter the left node of {node_found.data}: " SCREAMING_SNAKE_CASE : int = input(_a).strip().lower() or """n""" if check == "n": return tree_node SCREAMING_SNAKE_CASE : Tuple = TreeNode(int(_a)) SCREAMING_SNAKE_CASE : List[Any] = left_node q.put(_a) SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: " SCREAMING_SNAKE_CASE : List[Any] = input(_a).strip().lower() or """n""" if check == "n": return tree_node SCREAMING_SNAKE_CASE : Dict = TreeNode(int(_a)) SCREAMING_SNAKE_CASE : Dict = right_node q.put(_a) raise def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return print(node.data , end=",") pre_order(node.left) pre_order(node.right) def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return in_order(node.left) print(node.data , end=",") in_order(node.right) def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return post_order(node.left) post_order(node.right) print(node.data , end=",") def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(_a) while not q.empty(): SCREAMING_SNAKE_CASE : Any = q.get() print(node_dequeued.data , end=",") if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue() q.put(_a) while not q.empty(): SCREAMING_SNAKE_CASE : Any = [] while not q.empty(): SCREAMING_SNAKE_CASE : Tuple = q.get() print(node_dequeued.data , end=",") if node_dequeued.left: list_.append(node_dequeued.left) if node_dequeued.right: list_.append(node_dequeued.right) print() for node in list_: q.put(_a) def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[Any] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=",") stack.append(_a) SCREAMING_SNAKE_CASE : Optional[Any] = n.left # end of while means current node doesn't have left child SCREAMING_SNAKE_CASE : List[str] = stack.pop() # start to traverse its right child SCREAMING_SNAKE_CASE : Any = n.right def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return SCREAMING_SNAKE_CASE : list[TreeNode] = [] SCREAMING_SNAKE_CASE : Optional[int] = node while n or stack: while n: stack.append(_a) SCREAMING_SNAKE_CASE : int = n.left SCREAMING_SNAKE_CASE : Dict = stack.pop() print(n.data , end=",") SCREAMING_SNAKE_CASE : List[str] = n.right def lowerCamelCase__ ( _a): if not isinstance(_a , _a) or not node: return SCREAMING_SNAKE_CASE : List[Any] = [], [] SCREAMING_SNAKE_CASE : Tuple = node stacka.append(_a) while stacka: # to find the reversed order of post order, store it in stack2 SCREAMING_SNAKE_CASE : Any = stacka.pop() if n.left: stacka.append(n.left) if n.right: stacka.append(n.right) stacka.append(_a) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=",") def lowerCamelCase__ ( _a = "" , _a=50 , _a="*"): if not s: return "\n" + width * char SCREAMING_SNAKE_CASE : Union[str, Any] = divmod(width - len(_a) - 2 , 2) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) a_ = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
25
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class A__ ( unittest.TestCase): def UpperCamelCase__ ( self , __magic_name__ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): lowerCamelCase : List[str] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """sshleifer/tiny-gpt2""" lowerCamelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Dict = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Any = """sgugger/tiny-distilbert-classification""" lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """sshleifer/tiny-gpt2""" lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" lowerCamelCase : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : int = """sshleifer/tiny-gpt2""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : Any = TensorFlowBenchmark(__magic_name__ , [config] ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """patrickvonplaten/t5-tiny-random""" lowerCamelCase : Tuple = AutoConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) lowerCamelCase : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) lowerCamelCase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """sshleifer/tiny-gpt2""" lowerCamelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : int = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__magic_name__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__magic_name__ , """env.csv""" ) , multi_process=__magic_name__ , ) lowerCamelCase : List[str] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , """env.csv""" ) ).exists() ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__magic_name__ ): self.assertTrue(hasattr(__magic_name__ , """sequential""" ) ) self.assertTrue(hasattr(__magic_name__ , """cumulative""" ) ) self.assertTrue(hasattr(__magic_name__ , """current""" ) ) self.assertTrue(hasattr(__magic_name__ , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , """log.txt""" ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) lowerCamelCase : Tuple = TensorFlowBenchmark(__magic_name__ ) lowerCamelCase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , """log.txt""" ) ).exists() )
681
0
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class A_ : '''simple docstring''' def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): _UpperCamelCase = bp_numa _UpperCamelCase = bp_numa _UpperCamelCase = bp_numa _UpperCamelCase = conva_get[:2] _UpperCamelCase = conva_get[2] _UpperCamelCase = size_pa _UpperCamelCase = rate_w _UpperCamelCase = rate_t _UpperCamelCase = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] _UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) _UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) _UpperCamelCase = -2 * np.random.rand(self.conva[1] ) + 1 _UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1 _UpperCamelCase = -2 * np.random.rand(self.num_bpa ) + 1 def a ( self , A_ ): # save model dict with pickle _UpperCamelCase = { """num_bp1""": self.num_bpa, """num_bp2""": self.num_bpa, """num_bp3""": self.num_bpa, """conv1""": self.conva, """step_conv1""": self.step_conva, """size_pooling1""": self.size_poolinga, """rate_weight""": self.rate_weight, """rate_thre""": self.rate_thre, """w_conv1""": self.w_conva, """wkj""": self.wkj, """vji""": self.vji, """thre_conv1""": self.thre_conva, """thre_bp2""": self.thre_bpa, """thre_bp3""": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"Model saved: {save_path}" ) @classmethod def a ( cls , A_ ): # read saved model with open(A_ , "rb" ) as f: _UpperCamelCase = pickle.load(A_ ) # noqa: S301 _UpperCamelCase = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) _UpperCamelCase = model_dic.get("size_pooling1" ) _UpperCamelCase = model_dic.get("num_bp1" ) _UpperCamelCase = model_dic.get("num_bp2" ) _UpperCamelCase = model_dic.get("num_bp3" ) _UpperCamelCase = model_dic.get("rate_weight" ) _UpperCamelCase = model_dic.get("rate_thre" ) # create model instance _UpperCamelCase = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter _UpperCamelCase = model_dic.get("w_conv1" ) _UpperCamelCase = model_dic.get("wkj" ) _UpperCamelCase = model_dic.get("vji" ) _UpperCamelCase = model_dic.get("thre_conv1" ) _UpperCamelCase = model_dic.get("thre_bp2" ) _UpperCamelCase = model_dic.get("thre_bp3" ) return conv_ins def a ( self , A_ ): return 1 / (1 + np.exp(-1 * x )) def a ( self , A_ ): return round(A_ , 3 ) def a ( self , A_ , A_ , A_ , A_ , A_ ): # convolution process _UpperCamelCase = convs[0] _UpperCamelCase = convs[1] _UpperCamelCase = np.shape(A_ )[0] # get the data slice of original image data, data_focus _UpperCamelCase = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): _UpperCamelCase = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix _UpperCamelCase = [] _UpperCamelCase = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): _UpperCamelCase = [] for i_focus in range(len(A_ ) ): _UpperCamelCase = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) _UpperCamelCase = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion _UpperCamelCase = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) _UpperCamelCase = np.asarray(A_ ) return focus_list, data_featuremap def a ( self , A_ , A_ , A_="average_pool" ): # pooling process _UpperCamelCase = len(featuremaps[0] ) _UpperCamelCase = int(size_map / size_pooling ) _UpperCamelCase = [] for i_map in range(len(A_ ) ): _UpperCamelCase = featuremaps[i_map] _UpperCamelCase = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): _UpperCamelCase = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) _UpperCamelCase = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def a ( self , A_ ): # expanding three dimension data to one dimension list _UpperCamelCase = [] for i in range(len(A_ ) ): _UpperCamelCase = np.shape(data[i] ) _UpperCamelCase = data[i].reshape(1 , shapes[0] * shapes[1] ) _UpperCamelCase = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) _UpperCamelCase = np.asarray(A_ ) return data_expanded def a ( self , A_ ): # expanding matrix to one dimension list _UpperCamelCase = np.asarray(A_ ) _UpperCamelCase = np.shape(A_ ) _UpperCamelCase = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def a ( self , A_ , A_ , A_ , A_ , A_ ): _UpperCamelCase = [] _UpperCamelCase = 0 for i_map in range(A_ ): _UpperCamelCase = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): _UpperCamelCase = pd_pool[ i_pool ] _UpperCamelCase = i_pool + 1 _UpperCamelCase = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def a ( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) _UpperCamelCase = 0 _UpperCamelCase = [] _UpperCamelCase = 1_00_00 while rp < n_repeat and mse >= error_accuracy: _UpperCamelCase = 0 print(F"-------------Learning Time {rp}--------------" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) _UpperCamelCase = np.asmatrix(datas_train[p] ) _UpperCamelCase = np.asarray(datas_teach[p] ) _UpperCamelCase = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _UpperCamelCase = self.pooling(A_ , self.size_poolinga ) _UpperCamelCase = np.shape(A_ ) _UpperCamelCase = self._expand(A_ ) _UpperCamelCase = data_bp_input _UpperCamelCase = np.dot(A_ , self.vji.T ) - self.thre_bpa _UpperCamelCase = self.sig(A_ ) _UpperCamelCase = np.dot(A_ , self.wkj.T ) - self.thre_bpa _UpperCamelCase = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- _UpperCamelCase = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) _UpperCamelCase = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) _UpperCamelCase = np.dot(A_ , self.vji ) _UpperCamelCase = pd_i_all / (self.size_poolinga * self.size_poolinga) _UpperCamelCase = pd_conva_pooled.T.getA().tolist() _UpperCamelCase = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): _UpperCamelCase = self._expand_mat(pd_conva_all[k_conv] ) _UpperCamelCase = self.rate_weight * np.dot(A_ , A_ ) _UpperCamelCase = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) _UpperCamelCase = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer _UpperCamelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight _UpperCamelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight _UpperCamelCase = self.thre_bpa - pd_k_all * self.rate_thre _UpperCamelCase = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image _UpperCamelCase = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) _UpperCamelCase = rp + 1 _UpperCamelCase = error_count / patterns all_mse.append(A_ ) def draw_error(): _UpperCamelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") ) if draw_e: draw_error() return mse def a ( self , A_ ): # model predict _UpperCamelCase = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): _UpperCamelCase = np.asmatrix(datas_test[p] ) _UpperCamelCase = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _UpperCamelCase = self.pooling(A_ , self.size_poolinga ) _UpperCamelCase = self._expand(A_ ) _UpperCamelCase = data_bp_input _UpperCamelCase = bp_outa * self.vji.T - self.thre_bpa _UpperCamelCase = self.sig(A_ ) _UpperCamelCase = bp_outa * self.wkj.T - self.thre_bpa _UpperCamelCase = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) _UpperCamelCase = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def a ( self , A_ ): # return the data of image after convoluting process so we can check it out _UpperCamelCase = np.asmatrix(A_ ) _UpperCamelCase = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) _UpperCamelCase = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
138
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _a ( lowerCamelCase ): return x + 2 class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """x = 3""" lowerCamelCase : Tuple = {} lowerCamelCase : List[str] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) lowerCamelCase : Optional[int] = """x = y""" lowerCamelCase : Tuple = {"""y""": 5} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 5, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """y = add_two(x)""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result is None assert "tried to execute add_two" in out.out def UpperCamelCase__ ( self ): lowerCamelCase : int = """x = 3""" lowerCamelCase : Dict = {} lowerCamelCase : Tuple = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[Any] = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Tuple = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """x = 3\ny = 5""" lowerCamelCase : Optional[int] = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """text = f'This is x: {x}.'""" lowerCamelCase : Optional[int] = {"""x""": 3} lowerCamelCase : Optional[int] = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__magic_name__ , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCamelCase : Tuple = {"""x""": 3} lowerCamelCase : int = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 2} ) lowerCamelCase : Tuple = {"""x""": 8} lowerCamelCase : Dict = evaluate(__magic_name__ , {} , state=__magic_name__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 8, """y""": 5} ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = """test_list = [x, add_two(x)]""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) self.assertListEqual(__magic_name__ , [3, 5] ) self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCamelCase__ ( self ): lowerCamelCase : str = """y = x""" lowerCamelCase : List[Any] = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {} , state=__magic_name__ ) assert result == 3 self.assertDictEqual(__magic_name__ , {"""x""": 3, """y""": 3} ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCamelCase : Any = {"""x""": 3} lowerCamelCase : List[str] = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCamelCase : Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCamelCase : Dict = {"""x""": 3} lowerCamelCase : Any = evaluate(__magic_name__ , {"""add_two""": add_two} , state=__magic_name__ ) assert result == 5 self.assertDictEqual(__magic_name__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = """x = 0\nfor i in range(3):\n x = i""" lowerCamelCase : int = {} lowerCamelCase : Union[str, Any] = evaluate(__magic_name__ , {"""range""": range} , state=__magic_name__ ) assert result == 2 self.assertDictEqual(__magic_name__ , {"""x""": 2, """i""": 2} )
681
0
"""simple docstring""" import string import numpy def __snake_case ( __A : Optional[Any] , __A : Union[str, Any] ) -> List[Any]: '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , __A ) class lowerCAmelCase__ : '''simple docstring''' _SCREAMING_SNAKE_CASE : Tuple = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _SCREAMING_SNAKE_CASE : List[str] = numpy.vectorize(lambda _lowerCamelCase : x % 36 ) _SCREAMING_SNAKE_CASE : Optional[Any] = numpy.vectorize(__SCREAMING_SNAKE_CASE ) def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = self.modulus(_SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key SCREAMING_SNAKE_CASE : List[str] = encrypt_key.shape[0] def _lowerCAmelCase ( self : int , _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" return self.key_string.index(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : str , _SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" return self.key_string[round(_SCREAMING_SNAKE_CASE )] def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : int = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: SCREAMING_SNAKE_CASE : Union[str, Any] = det % len(self.key_string ) SCREAMING_SNAKE_CASE : Dict = len(self.key_string ) if greatest_common_divisor(_SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1: SCREAMING_SNAKE_CASE : List[Any] = ( f"""determinant modular {req_l} of encryption key({det}) """ f"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = [char for char in text.upper() if char in self.key_string] SCREAMING_SNAKE_CASE : Optional[int] = chars[-1] while len(_SCREAMING_SNAKE_CASE ) % self.break_key != 0: chars.append(_SCREAMING_SNAKE_CASE ) return "".join(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.process_text(text.upper() ) SCREAMING_SNAKE_CASE : Optional[int] = """""" for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ): SCREAMING_SNAKE_CASE : int = text[i : i + self.break_key] SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(_SCREAMING_SNAKE_CASE ) for char in batch] SCREAMING_SNAKE_CASE : Dict = numpy.array([vec] ).T SCREAMING_SNAKE_CASE : List[str] = self.modulus(self.encrypt_key.dot(_SCREAMING_SNAKE_CASE ) ).T.tolist()[ 0 ] SCREAMING_SNAKE_CASE : Dict = """""".join( self.replace_digits(_SCREAMING_SNAKE_CASE ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def _lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: SCREAMING_SNAKE_CASE : Optional[int] = det % len(self.key_string ) SCREAMING_SNAKE_CASE : Any = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: SCREAMING_SNAKE_CASE : List[str] = i break SCREAMING_SNAKE_CASE : Dict = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(_SCREAMING_SNAKE_CASE ) ) def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = self.make_decrypt_key() SCREAMING_SNAKE_CASE : Optional[int] = self.process_text(text.upper() ) SCREAMING_SNAKE_CASE : List[str] = """""" for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ): SCREAMING_SNAKE_CASE : Union[str, Any] = text[i : i + self.break_key] SCREAMING_SNAKE_CASE : Union[str, Any] = [self.replace_letters(_SCREAMING_SNAKE_CASE ) for char in batch] SCREAMING_SNAKE_CASE : int = numpy.array([vec] ).T SCREAMING_SNAKE_CASE : Dict = self.modulus(decrypt_key.dot(_SCREAMING_SNAKE_CASE ) ).T.tolist()[0] SCREAMING_SNAKE_CASE : List[str] = """""".join( self.replace_digits(_SCREAMING_SNAKE_CASE ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __snake_case ( ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = int(input('Enter the order of the encryption key: ' ) ) SCREAMING_SNAKE_CASE : int = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(__A ): SCREAMING_SNAKE_CASE : int = [int(__A ) for x in input().split()] hill_matrix.append(__A ) SCREAMING_SNAKE_CASE : Optional[Any] = HillCipher(numpy.array(__A ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) SCREAMING_SNAKE_CASE : List[str] = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": SCREAMING_SNAKE_CASE : Union[str, Any] = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(__A ) ) elif option == "2": SCREAMING_SNAKE_CASE : Tuple = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
265
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = """decision_transformer""" _UpperCAmelCase : str = ["""past_key_values"""] _UpperCAmelCase : Any = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ): lowerCamelCase : Optional[int] = state_dim lowerCamelCase : int = act_dim lowerCamelCase : int = hidden_size lowerCamelCase : Union[str, Any] = max_ep_len lowerCamelCase : Optional[int] = action_tanh lowerCamelCase : Any = vocab_size lowerCamelCase : List[str] = n_positions lowerCamelCase : List[Any] = n_layer lowerCamelCase : Dict = n_head lowerCamelCase : Optional[Any] = n_inner lowerCamelCase : Tuple = activation_function lowerCamelCase : Tuple = resid_pdrop lowerCamelCase : str = embd_pdrop lowerCamelCase : Dict = attn_pdrop lowerCamelCase : Tuple = layer_norm_epsilon lowerCamelCase : Tuple = initializer_range lowerCamelCase : Tuple = scale_attn_weights lowerCamelCase : str = use_cache lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx lowerCamelCase : List[str] = reorder_and_upcast_attn lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : str = eos_token_id super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
681
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __UpperCAmelCase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _UpperCamelCase : Dict = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCamelCase : Any = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : List[Any] = False def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_=False ): '''simple docstring''' A__ : int = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class in get_values(snake_case_ ): A__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __UpperCAmelCase (__SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ): '''simple docstring''' A__ : Any = parent A__ : int = batch_size A__ : Dict = seq_length A__ : Union[str, Any] = is_training A__ : int = use_input_mask A__ : Union[str, Any] = use_token_type_ids A__ : int = use_labels A__ : Dict = vocab_size A__ : Union[str, Any] = hidden_size A__ : str = num_hidden_layers A__ : Any = num_attention_heads A__ : Optional[Any] = intermediate_size A__ : str = hidden_act A__ : List[Any] = hidden_dropout_prob A__ : Tuple = attention_probs_dropout_prob A__ : List[str] = max_position_embeddings A__ : Tuple = type_vocab_size A__ : int = type_sequence_label_size A__ : Dict = initializer_range A__ : Union[str, Any] = num_labels A__ : Optional[int] = num_choices A__ : Union[str, Any] = scope A__ : Tuple = embedding_size def lowerCamelCase ( self ): '''simple docstring''' A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Optional[Any] = None if self.use_input_mask: A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A__ : Dict = None if self.use_token_type_ids: A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Any = None A__ : Optional[Any] = None A__ : Union[str, Any] = None if self.use_labels: A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) A__ : Any = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : List[Any] = TFMobileBertModel(config=snake_case_ ) A__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Dict = model(snake_case_ ) A__ : Optional[Any] = [input_ids, input_mask] A__ : List[Any] = model(snake_case_ ) A__ : Dict = model(snake_case_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Optional[int] = TFMobileBertForMaskedLM(config=snake_case_ ) A__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : List[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ ) A__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Optional[Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : List[Any] = TFMobileBertForPreTraining(config=snake_case_ ) A__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Optional[int] = model(snake_case_ ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : str = self.num_labels A__ : int = TFMobileBertForSequenceClassification(config=snake_case_ ) A__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : List[str] = self.num_choices A__ : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ ) A__ : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) A__ : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) A__ : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) ) A__ : Any = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } A__ : Dict = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : Optional[int] = self.num_labels A__ : Dict = TFMobileBertForTokenClassification(config=snake_case_ ) A__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Union[str, Any] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' A__ : List[Any] = TFMobileBertForQuestionAnswering(config=snake_case_ ) A__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Union[str, Any] = self.prepare_config_and_inputs() ( A__ ) : Tuple = config_and_inputs A__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCamelCase ( self ): '''simple docstring''' A__ : int = TFMobileBertModelTest.TFMobileBertModelTester(self ) A__ : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 ) def lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase ( self ): '''simple docstring''' A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ ) def lowerCamelCase ( self ): '''simple docstring''' A__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ ) @slow def lowerCamelCase ( self ): '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ : Tuple = TFMobileBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_tf class __UpperCAmelCase (unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase ( self ): '''simple docstring''' A__ : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) A__ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] ) A__ : List[str] = model(snake_case_ )[0] A__ : Tuple = [1, 6, 30_522] self.assertEqual(output.shape , snake_case_ ) A__ : List[str] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
363
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig _lowerCamelCase =logging.get_logger(__name__) class A__ : def __init__( self , __magic_name__ , __magic_name__ ): lowerCamelCase : Any = question_encoder lowerCamelCase : Dict = generator lowerCamelCase : Tuple = self.question_encoder def UpperCamelCase__ ( self , __magic_name__ ): if os.path.isfile(__magic_name__ ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" ) lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" ) self.question_encoder.save_pretrained(__magic_name__ ) self.generator.save_pretrained(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ ) if config is None: lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ ) lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( __magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" ) lowerCamelCase : Any = AutoTokenizer.from_pretrained( __magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" ) return cls(question_encoder=__magic_name__ , generator=__magic_name__ ) def __call__( self , *__magic_name__ , **__magic_name__ ): return self.current_tokenizer(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ): return self.generator.decode(*__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = self.question_encoder def UpperCamelCase__ ( self ): lowerCamelCase : str = self.generator def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , __magic_name__ , ) if max_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : int = self( __magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase : int = self.current_tokenizer.model_max_length lowerCamelCase : Dict = self( text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , ) lowerCamelCase : List[Any] = labels["""input_ids"""] return model_inputs
681
0
from __future__ import annotations SCREAMING_SNAKE_CASE : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] SCREAMING_SNAKE_CASE : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ): """simple docstring""" a_ : int = [] a_ : str = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): a_ : float = -1 for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if arr[i] < arr[j]: a_ : Optional[int] = arr[j] break result.append(SCREAMING_SNAKE_CASE_ ) return result def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ): """simple docstring""" a_ : List[str] = [] for i, outer in enumerate(SCREAMING_SNAKE_CASE_ ): a_ : float = -1 for inner in arr[i + 1 :]: if outer < inner: a_ : str = inner break result.append(SCREAMING_SNAKE_CASE_ ) return result def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ): """simple docstring""" a_ : List[str] = len(SCREAMING_SNAKE_CASE_ ) a_ : list[float] = [] a_ : list[float] = [-1] * arr_size for index in reversed(range(SCREAMING_SNAKE_CASE_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: a_ : Optional[Any] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) SCREAMING_SNAKE_CASE : Optional[int] = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
419
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : List[Any] = F'''{sampling_rate}''' lowerCamelCase : Optional[int] = """1""" lowerCamelCase : Any = """f32le""" lowerCamelCase : Any = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCamelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process: lowerCamelCase : Optional[int] = ffmpeg_process.communicate(lowerCamelCase ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error lowerCamelCase : Union[str, Any] = output_stream[0] lowerCamelCase : Optional[Any] = np.frombuffer(lowerCamelCase, np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = "f32le", ): lowerCamelCase : Dict = F'''{sampling_rate}''' lowerCamelCase : List[Any] = """1""" if format_for_conversion == "s16le": lowerCamelCase : Any = 2 elif format_for_conversion == "f32le": lowerCamelCase : Dict = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) lowerCamelCase : Dict = platform.system() if system == "Linux": lowerCamelCase : Union[str, Any] = """alsa""" lowerCamelCase : List[Any] = """default""" elif system == "Darwin": lowerCamelCase : List[Any] = """avfoundation""" lowerCamelCase : List[Any] = """:0""" elif system == "Windows": lowerCamelCase : int = """dshow""" lowerCamelCase : Any = """default""" lowerCamelCase : Any = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] lowerCamelCase : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowerCamelCase : Any = _ffmpeg_stream(lowerCamelCase, lowerCamelCase ) for item in iterator: yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = "f32le", ): if stream_chunk_s is not None: lowerCamelCase : int = stream_chunk_s else: lowerCamelCase : Dict = chunk_length_s lowerCamelCase : Optional[Any] = ffmpeg_microphone(lowerCamelCase, lowerCamelCase, format_for_conversion=lowerCamelCase ) if format_for_conversion == "s16le": lowerCamelCase : Optional[int] = np.intaa lowerCamelCase : Optional[Any] = 2 elif format_for_conversion == "f32le": lowerCamelCase : int = np.floataa lowerCamelCase : Any = 4 else: raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' ) if stride_length_s is None: lowerCamelCase : Any = chunk_length_s / 6 lowerCamelCase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCamelCase, (int, float) ): lowerCamelCase : Optional[int] = [stride_length_s, stride_length_s] lowerCamelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowerCamelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowerCamelCase : List[Any] = datetime.datetime.now() lowerCamelCase : List[Any] = datetime.timedelta(seconds=lowerCamelCase ) for item in chunk_bytes_iter(lowerCamelCase, lowerCamelCase, stride=(stride_left, stride_right), stream=lowerCamelCase ): # Put everything back in numpy scale lowerCamelCase : Dict = np.frombuffer(item["""raw"""], dtype=lowerCamelCase ) lowerCamelCase : List[Any] = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) lowerCamelCase : Tuple = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False ): lowerCamelCase : Optional[int] = B"""""" lowerCamelCase , lowerCamelCase : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' ) lowerCamelCase : str = 0 for raw in iterator: acc += raw if stream and len(lowerCamelCase ) < chunk_len: lowerCamelCase : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCamelCase ) >= chunk_len: # We are flushing the accumulator lowerCamelCase : str = (_stride_left, stride_right) lowerCamelCase : Dict = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: lowerCamelCase : Optional[int] = False yield item lowerCamelCase : str = stride_left lowerCamelCase : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCamelCase ) > stride_left: lowerCamelCase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: lowerCamelCase : List[Any] = False yield item def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Optional[int] = 2**24 # 16Mo try: with subprocess.Popen(lowerCamelCase, stdout=subprocess.PIPE, bufsize=lowerCamelCase ) as ffmpeg_process: while True: lowerCamelCase : Any = ffmpeg_process.stdout.read(lowerCamelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
681
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer snake_case_ = logging.get_logger(__name__) snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} snake_case_ = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } snake_case_ = {"""mobilebert-uncased""": 512} snake_case_ = {} class a__ ( __SCREAMING_SNAKE_CASE ): __magic_name__ : List[str] = VOCAB_FILES_NAMES __magic_name__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : List[Any] = PRETRAINED_INIT_CONFIGURATION __magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = MobileBertTokenizer def __init__(self : List[Any], __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : Dict=None, __UpperCAmelCase : Tuple=True, __UpperCAmelCase : Union[str, Any]="[UNK]", __UpperCAmelCase : List[str]="[SEP]", __UpperCAmelCase : Union[str, Any]="[PAD]", __UpperCAmelCase : int="[CLS]", __UpperCAmelCase : Dict="[MASK]", __UpperCAmelCase : List[Any]=True, __UpperCAmelCase : int=None, **__UpperCAmelCase : Any, ) -> Union[str, Any]: """simple docstring""" super().__init__( __UpperCAmelCase, tokenizer_file=__UpperCAmelCase, do_lower_case=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, tokenize_chinese_chars=__UpperCAmelCase, strip_accents=__UpperCAmelCase, **__UpperCAmelCase, ) SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', __UpperCAmelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE : Optional[Any] = getattr(__UpperCAmelCase, normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE : Any = do_lower_case SCREAMING_SNAKE_CASE : Union[str, Any] = strip_accents SCREAMING_SNAKE_CASE : Tuple = tokenize_chinese_chars SCREAMING_SNAKE_CASE : Dict = normalizer_class(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case def lowercase__ (self : List[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : List[Any]=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ (self : Dict, __UpperCAmelCase : int, __UpperCAmelCase : Optional[int] = None ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ (self : Optional[int], __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Optional[Any] = None ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase, name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
507
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""") @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ]) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__magic_name__ , ) assert hasattr(self , """env""" ) def UpperCamelCase__ ( self , __magic_name__ ): # configuration for running training on smdistributed Model Parallel lowerCamelCase : Any = { """enabled""": True, """processes_per_host""": 8, } lowerCamelCase : Any = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCamelCase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCamelCase : Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=__magic_name__ , py_version="""py36""" , ) def UpperCamelCase__ ( self , __magic_name__ ): TrainingJobAnalytics(__magic_name__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def UpperCamelCase__ ( self , __magic_name__ ): # create estimator lowerCamelCase : int = self.create_estimator(__magic_name__ ) # run training estimator.fit() # result dataframe lowerCamelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __magic_name__ )
681
0
"""simple docstring""" import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) UpperCAmelCase_ : Union[str, Any] = getLogger(__name__) def _lowerCAmelCase(a : Tuple , a : Union[str, Any] , a : Any , a : int = 8 , a : int = 1024 , a : Union[str, Any]="val" , a : List[Any]=None , a : int=False , a : Optional[Any]="summarization" , a : Any=None , a : List[Any]=1 , a : Optional[Any] = None , a : Union[str, Any]="" , **a : Optional[int] , ) -> int: _SCREAMING_SNAKE_CASE =str(a ) assert local_rank is not None torch.distributed.init_process_group(backend='''nccl''' , rank=a ) _SCREAMING_SNAKE_CASE =Path(a ) _SCREAMING_SNAKE_CASE =save_dir.joinpath(f"""rank_{local_rank}_output.json""" ) torch.cuda.set_device(a ) _SCREAMING_SNAKE_CASE =AutoModelForSeqaSeqLM.from_pretrained(a ).cuda() if fpaa: _SCREAMING_SNAKE_CASE =model.half() # determine if we need to increase num_beams use_task_specific_params(a , a ) # update config with task specific params _SCREAMING_SNAKE_CASE =generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: _SCREAMING_SNAKE_CASE =num_return_sequences _SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(a ) logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type. if max_source_length is None: _SCREAMING_SNAKE_CASE =tokenizer.model_max_length if prefix is None: _SCREAMING_SNAKE_CASE =prefix or getattr(model.config , '''prefix''' , '''''' ) or """""" _SCREAMING_SNAKE_CASE =SeqaSeqDataset( a , a , a , max_target_length=1024 , type_path=a , n_obs=a , prefix=a , **a , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. _SCREAMING_SNAKE_CASE =ds.make_sortish_sampler(a , distributed=a , add_extra_examples=a , shuffle=a ) _SCREAMING_SNAKE_CASE =DataLoader(a , sampler=a , batch_size=a , collate_fn=ds.collate_fn ) _SCREAMING_SNAKE_CASE =[] for batch in tqdm(a ): _SCREAMING_SNAKE_CASE =model.generate( input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=a , num_beams=a , **a , ) _SCREAMING_SNAKE_CASE =tokenizer.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) _SCREAMING_SNAKE_CASE =batch["""ids"""] if num_return_sequences > 1: _SCREAMING_SNAKE_CASE =chunks(a , a ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(a ): results.append({'''pred''': pred, '''id''': ids[i].item()} ) save_json(a , a ) return results, sampler.num_replicas def _lowerCAmelCase() -> int: _SCREAMING_SNAKE_CASE =argparse.ArgumentParser( epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' ) parser.add_argument('''--data_dir''' , type=a , help='''like cnn_dm/test.source''' ) parser.add_argument( '''--model_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , ) parser.add_argument('''--save_dir''' , type=a , help='''where to save''' , default='''tmp_gen''' ) parser.add_argument('''--max_source_length''' , type=a , default=a ) parser.add_argument( '''--type_path''' , type=a , default='''test''' , help='''which subset to evaluate typically train/val/test''' ) parser.add_argument('''--task''' , type=a , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=a , default=8 , required=a , help='''batch size''' ) parser.add_argument( '''--local_rank''' , type=a , default=-1 , required=a , help='''should be passed by distributed.launch''' ) parser.add_argument( '''--n_obs''' , type=a , default=a , required=a , help='''How many observations. Defaults to all.''' ) parser.add_argument( '''--num_return_sequences''' , type=a , default=1 , required=a , help='''How many sequences to return''' ) parser.add_argument( '''--sync_timeout''' , type=a , default=600 , required=a , help='''How long should master process wait for other processes to finish.''' , ) parser.add_argument('''--src_lang''' , type=a , default=a , required=a ) parser.add_argument('''--tgt_lang''' , type=a , default=a , required=a ) parser.add_argument( '''--prefix''' , type=a , required=a , default=a , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--debug''' , action='''store_true''' ) _SCREAMING_SNAKE_CASE =time.time() _SCREAMING_SNAKE_CASE =parser.parse_known_args() _SCREAMING_SNAKE_CASE =parse_numeric_n_bool_cl_kwargs(a ) if generate_kwargs and args.local_rank <= 0: print(f"""parsed the following generate kwargs: {generate_kwargs}""" ) _SCREAMING_SNAKE_CASE =Path(args.save_dir + '''_tmp''' ) Path(a ).mkdir(exist_ok=a ) # this handles locking. _SCREAMING_SNAKE_CASE =list(json_save_dir.glob('''rank_*.json''' ) ) if intermediate_files: raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. _SCREAMING_SNAKE_CASE ={} if args.src_lang is not None: _SCREAMING_SNAKE_CASE =args.src_lang if args.tgt_lang is not None: _SCREAMING_SNAKE_CASE =args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=a ) _SCREAMING_SNAKE_CASE =eval_data_dir( args.data_dir , a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=a , **a , ) if args.local_rank <= 0: _SCREAMING_SNAKE_CASE =Path(args.save_dir ) save_dir.mkdir(exist_ok=a ) _SCREAMING_SNAKE_CASE =gather_results_from_each_node(a , a , args.sync_timeout ) _SCREAMING_SNAKE_CASE =combine_partial_results(a ) if args.num_return_sequences > 1: _SCREAMING_SNAKE_CASE =save_dir.joinpath('''pseudolabel_results.json''' ) print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" ) save_json(a , a ) return _SCREAMING_SNAKE_CASE =Path(args.data_dir ).joinpath(args.type_path + '''.target''' ) with open(a ) as f: _SCREAMING_SNAKE_CASE =[x.rstrip() for x in f.readlines()][: len(a )] # Calculate metrics, save metrics, and save _generations.txt _SCREAMING_SNAKE_CASE ="""translation""" in args.task _SCREAMING_SNAKE_CASE =calculate_bleu if calc_bleu else calculate_rouge _SCREAMING_SNAKE_CASE ="""bleu""" if calc_bleu else """rouge""" _SCREAMING_SNAKE_CASE =score_fn(a , a ) _SCREAMING_SNAKE_CASE =len(a ) _SCREAMING_SNAKE_CASE =time.time() - start_time _SCREAMING_SNAKE_CASE =round(runtime / metrics['''n_obs'''] , 4 ) _SCREAMING_SNAKE_CASE =num_replicas # TODO(@stas00): add whatever metadata to metrics _SCREAMING_SNAKE_CASE =save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" ) save_json(a , a , indent=a ) print(a ) write_txt_file(a , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) ) if args.debug: write_txt_file(a , save_dir.joinpath(f"""{args.type_path}.target""" ) ) else: shutil.rmtree(a ) def _lowerCAmelCase(a : Union[str, Any] ) -> str: _SCREAMING_SNAKE_CASE =[] for partial_result in partial_results: records.extend(a ) _SCREAMING_SNAKE_CASE =sorted(a , key=lambda a : x["id"] ) _SCREAMING_SNAKE_CASE =[x["""pred"""] for x in records] return preds def _lowerCAmelCase(a : str , a : Optional[Any] , a : Any ) -> str: # WAIT FOR lots of .json files _SCREAMING_SNAKE_CASE =time.time() logger.info('''waiting for all nodes to finish''' ) _SCREAMING_SNAKE_CASE =None while (time.time() - start_wait) < timeout: _SCREAMING_SNAKE_CASE =list(save_dir.glob('''rank_*.json''' ) ) if len(a ) < num_replicas: continue try: # make sure all json files are fully saved _SCREAMING_SNAKE_CASE =lmap(a , a ) return json_data except JSONDecodeError: continue else: raise TimeoutError('''Rank 0 gave up on waiting for other processes''' ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
255
from __future__ import annotations def _a ( lowerCamelCase ): lowerCamelCase : Union[str, Any] = str(lowerCamelCase ) return n == n[::-1] def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = 0 for i in range(1, lowerCamelCase ): if is_palindrome(lowerCamelCase ) and is_palindrome(bin(lowerCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
681
0
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
549
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def _a ( lowerCamelCase, lowerCamelCase=False ): lowerCamelCase : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCamelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[Any] = """""" else: lowerCamelCase : Optional[int] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : List[str] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size] lowerCamelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : Any = in_proj_bias[-config.hidden_size :] def _a ( lowerCamelCase ): lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. lowerCamelCase : Any = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowerCamelCase : Dict = dct.pop(lowerCamelCase ) lowerCamelCase : str = val def _a ( lowerCamelCase, lowerCamelCase ): lowerCamelCase : Any = ViTMSNConfig() lowerCamelCase : Tuple = 1000 lowerCamelCase : List[Any] = """datasets/huggingface/label-files""" lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase ), """r""" ) ) lowerCamelCase : List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()} lowerCamelCase : Optional[int] = idalabel lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: lowerCamelCase : int = 384 lowerCamelCase : Optional[int] = 1536 lowerCamelCase : Tuple = 6 elif "l16" in checkpoint_url: lowerCamelCase : Dict = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Optional[int] = 24 lowerCamelCase : str = 16 lowerCamelCase : str = 0.1 elif "b4" in checkpoint_url: lowerCamelCase : Union[str, Any] = 4 elif "l7" in checkpoint_url: lowerCamelCase : Tuple = 7 lowerCamelCase : Optional[int] = 1024 lowerCamelCase : List[Any] = 4096 lowerCamelCase : Tuple = 24 lowerCamelCase : Dict = 16 lowerCamelCase : str = 0.1 lowerCamelCase : List[Any] = ViTMSNModel(lowerCamelCase ) lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )["""target_encoder"""] lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(lowerCamelCase ) lowerCamelCase : Dict = create_rename_keys(lowerCamelCase, base_model=lowerCamelCase ) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase ) read_in_q_k_v(lowerCamelCase, lowerCamelCase, base_model=lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() lowerCamelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ) lowerCamelCase : Union[str, Any] = ViTImageProcessor( size=config.image_size, image_mean=lowerCamelCase, image_std=lowerCamelCase ) lowerCamelCase : Tuple = image_processor(images=lowerCamelCase, return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) lowerCamelCase : int = model(**lowerCamelCase ) lowerCamelCase : Union[str, Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: lowerCamelCase : Union[str, Any] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] ) elif "b16" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] ) elif "l16" in checkpoint_url: lowerCamelCase : List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] ) elif "b4" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] ) else: lowerCamelCase : List[str] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3], lowerCamelCase, atol=1e-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase =parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
681
0
"""simple docstring""" from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging __magic_name__ : Union[str, Any] = logging.get_logger(__name__) def a_ ( lowercase__ :List[Any], lowercase__ :Dict ): try: with open(lowercase__, """rb""" ) as flax_state_f: __lowerCamelCase = from_bytes(lowercase__, flax_state_f.read() ) except UnpicklingError as e: try: with open(lowercase__ ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(lowercase__, lowercase__ ) def a_ ( lowercase__ :Optional[Any], lowercase__ :List[Any] ): try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights __lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa, lowercase__ ) ).values() if any(lowercase__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) __lowerCamelCase = jax.tree_util.tree_map( lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, lowercase__ ) __lowerCamelCase = """""" __lowerCamelCase = flatten_dict(lowercase__, sep=""".""" ) __lowerCamelCase = pt_model.state_dict() # keep track of unexpected & missing keys __lowerCamelCase = [] __lowerCamelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __lowerCamelCase = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: __lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] __lowerCamelCase = jnp.transpose(lowercase__, (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": __lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] __lowerCamelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": __lowerCamelCase = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowercase__ ): __lowerCamelCase = ( flax_key_tuple_string.replace("""_0""", """.0""" ) .replace("""_1""", """.1""" ) .replace("""_2""", """.2""" ) .replace("""_3""", """.3""" ) .replace("""_4""", """.4""" ) .replace("""_5""", """.5""" ) .replace("""_6""", """.6""" ) .replace("""_7""", """.7""" ) .replace("""_8""", """.8""" ) .replace("""_9""", """.9""" ) ) __lowerCamelCase = """.""".join(lowercase__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict __lowerCamelCase = np.asarray(lowercase__ ) if not isinstance(lowercase__, np.ndarray ) else flax_tensor __lowerCamelCase = torch.from_numpy(lowercase__ ) # remove from missing keys missing_keys.remove(lowercase__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowercase__ ) pt_model.load_state_dict(lowercase__ ) # re-transform missing_keys to list __lowerCamelCase = list(lowercase__ ) if len(lowercase__ ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(lowercase__ ) > 0: logger.warning( f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' """ use it for predictions and inference.""" ) return pt_model
281
def _a ( lowerCamelCase ): if num < 0: return False lowerCamelCase : int = num lowerCamelCase : int = 0 while num > 0: lowerCamelCase : str = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
681
0
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ : List[str] = logging.get_logger(__name__) class _UpperCAmelCase : def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict ): snake_case_ : Any = question_encoder snake_case_ : Dict = generator snake_case_ : Tuple = self.question_encoder def _snake_case ( self : int , lowercase_ : str ): if os.path.isfile(lowercase_ ): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ : Any = os.path.join(lowercase_ , '''question_encoder_tokenizer''' ) snake_case_ : str = os.path.join(lowercase_ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(lowercase_ ) self.generator.save_pretrained(lowercase_ ) @classmethod def _snake_case ( cls : str , lowercase_ : Optional[Any] , **lowercase_ : Dict ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer snake_case_ : Any = kwargs.pop('''config''' , lowercase_ ) if config is None: snake_case_ : Tuple = RagConfig.from_pretrained(lowercase_ ) snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained( lowercase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) snake_case_ : Any = AutoTokenizer.from_pretrained( lowercase_ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=lowercase_ , generator=lowercase_ ) def __call__( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Dict ): return self.current_tokenizer(*lowercase_ , **lowercase_ ) def _snake_case ( self : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ): return self.generator.batch_decode(*lowercase_ , **lowercase_ ) def _snake_case ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ): return self.generator.decode(*lowercase_ , **lowercase_ ) def _snake_case ( self : List[str] ): snake_case_ : Union[str, Any] = self.question_encoder def _snake_case ( self : Tuple ): snake_case_ : str = self.generator def _snake_case ( self : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] = None , lowercase_ : List[Any] = None , lowercase_ : int = None , lowercase_ : Tuple = "longest" , lowercase_ : Any = None , lowercase_ : str = True , **lowercase_ : int , ): warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , lowercase_ , ) if max_length is None: snake_case_ : int = self.current_tokenizer.model_max_length snake_case_ : int = self( lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: snake_case_ : int = self.current_tokenizer.model_max_length snake_case_ : Dict = self( text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , ) snake_case_ : List[Any] = labels["""input_ids"""] return model_inputs
123
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCamelCase ={ """configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""], """tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase =[ """GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXJapaneseForCausalLM""", """GPTNeoXJapaneseLayer""", """GPTNeoXJapaneseModel""", """GPTNeoXJapanesePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
681
0
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def _A ( A ) -> int: lowercase : Optional[int] = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(A ,A ) def _A ( A ) -> str: lowercase : List[Any] = emb.weight.shape lowercase : List[Any] = nn.Linear(A ,A ,bias=A ) lowercase : Tuple = emb.weight.data return lin_layer def _A ( A ) -> Dict: lowercase : Optional[int] = torch.load(A ,map_location="cpu" ) lowercase : Any = Namespace(**checkpoint["cfg"]["model"] ) lowercase : int = checkpoint["""model"""] remove_ignore_keys_(A ) lowercase : Optional[Any] = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase : str = {key.replace("decoder" ,"model" ): val for key, val in state_dict.items()} lowercase : int = XGLMConfig( vocab_size=A ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="gelu" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,) lowercase : Dict = XGLMForCausalLM(A ) lowercase : Optional[Any] = model.load_state_dict(A ,strict=A ) print(A ) lowercase : Any = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCAmelCase : Union[str, Any] = parser.parse_args() lowerCAmelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
372
import copy import random from transformers import CLIPTokenizer class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): super().__init__(*__magic_name__ , **__magic_name__ ) lowerCamelCase : Dict = {} def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ): lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ): lowerCamelCase : List[Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) else: lowerCamelCase : Dict = [] for i in range(__magic_name__ ): lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}''' self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ ) output.append(__magic_name__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowerCamelCase : Any = output def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ): if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[str] = [] for i in range(len(__magic_name__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowerCamelCase : List[str] = self.token_map[placeholder_token] lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )] if vector_shuffle: lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ ) random.shuffle(__magic_name__ ) lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) ) return text def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().__call__( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , ) def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ): return super().encode( self.replace_placeholder_tokens_in_text( __magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
681
0
def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : List[Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
25
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class A__ ( unittest.TestCase): def __init__( self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=1_8 , __magic_name__=3_0 , __magic_name__=4_0_0 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __magic_name__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __magic_name__=True , ): lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} lowerCamelCase : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8} lowerCamelCase : Optional[int] = parent lowerCamelCase : Union[str, Any] = batch_size lowerCamelCase : str = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Union[str, Any] = max_resolution lowerCamelCase : Union[str, Any] = do_resize lowerCamelCase : int = size lowerCamelCase : int = do_center_crop lowerCamelCase : Union[str, Any] = crop_size lowerCamelCase : Union[str, Any] = do_normalize lowerCamelCase : Dict = image_mean lowerCamelCase : Optional[Any] = image_std lowerCamelCase : Union[str, Any] = do_convert_rgb def UpperCamelCase__ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ ( self , __magic_name__=False , __magic_name__=False , __magic_name__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowerCamelCase : Dict = [] for i in range(self.batch_size ): lowerCamelCase , lowerCamelCase : int = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase : int = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] if torchify: lowerCamelCase : int = [torch.from_numpy(__magic_name__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Any = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} ) self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} ) lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} ) self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , numpify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , np.ndarray ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Tuple = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ , torchify=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , torch.Tensor ) # Test not batched input lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : str = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__magic_name__ ) lowerCamelCase : Any = 3 @property def UpperCamelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ ( self ): lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) ) self.assertTrue(hasattr(__magic_name__ , """size""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """center_crop""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) ) self.assertTrue(hasattr(__magic_name__ , """image_std""" ) ) self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) ) def UpperCamelCase__ ( self ): pass def UpperCamelCase__ ( self ): # Initialize image_processing lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__magic_name__ ) for image in image_inputs: self.assertIsInstance(__magic_name__ , Image.Image ) # Test not batched input lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched lowerCamelCase : Optional[Any] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
681
0
"""simple docstring""" import functools def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) @functools.cache def min_distance(lowerCAmelCase_ , lowerCAmelCase_ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __SCREAMING_SNAKE_CASE = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCAmelCase_ ) , 1 + min_distance(lowerCAmelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
682
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCAmelCase__ (lowerCAmelCase_=None ): '''simple docstring''' if subparsers is not None: __SCREAMING_SNAKE_CASE = subparsers.add_parser("env" ) else: __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.__version__ __SCREAMING_SNAKE_CASE = torch.cuda.is_available() __SCREAMING_SNAKE_CASE = is_xpu_available() __SCREAMING_SNAKE_CASE = is_npu_available() __SCREAMING_SNAKE_CASE = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict() __SCREAMING_SNAKE_CASE = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(lowerCAmelCase_ ), "PyTorch NPU available": str(lowerCAmelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: __SCREAMING_SNAKE_CASE = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __SCREAMING_SNAKE_CASE = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else f"""\t{accelerate_config}""" ) print(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = accelerate_config return info def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = env_command_parser() __SCREAMING_SNAKE_CASE = parser.parse_args() env_command(lowerCAmelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
682
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : str = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Any = PegasusTokenizer snake_case__ : List[str] = PegasusTokenizerFast snake_case__ : Optional[Any] = True snake_case__ : Optional[int] = True def UpperCAmelCase_ ( self : Tuple ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = PegasusTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def UpperCAmelCase_ ( self : Dict , **UpperCAmelCase__ : Union[str, Any] ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Union[str, Any]: return ("This is a test", "This is a test") def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = "</s>" __SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(UpperCAmelCase__ ) , 1_1_0_3 ) def UpperCAmelCase_ ( self : str ) -> Optional[int]: self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def UpperCAmelCase_ ( self : str ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) __SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0] __SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Any: __SCREAMING_SNAKE_CASE = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word __SCREAMING_SNAKE_CASE = "<mask_1> To ensure a <mask_2> flow of bank resolutions." __SCREAMING_SNAKE_CASE = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] __SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ ).input_ids[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 __SCREAMING_SNAKE_CASE = "To ensure a smooth flow of bank resolutions." __SCREAMING_SNAKE_CASE = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] __SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ ).input_ids[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCAmelCase_ ( self : Tuple ) -> Tuple: __SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 1_5_0, "short example"] __SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] __SCREAMING_SNAKE_CASE = self._large_tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" ) __SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=UpperCAmelCase__ , max_length=5 , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase__ ) == 2 # input_ids, attention_mask. @slow def UpperCAmelCase_ ( self : int ) -> List[str]: # fmt: off __SCREAMING_SNAKE_CASE = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = PegasusTokenizer snake_case__ : List[Any] = PegasusTokenizerFast snake_case__ : Optional[int] = True snake_case__ : List[Any] = True def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = PegasusTokenizer(UpperCAmelCase__ , offset=0 , mask_token_sent=UpperCAmelCase__ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self : Optional[int] ) -> int: return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def UpperCAmelCase_ ( self : Union[str, Any] , **UpperCAmelCase__ : str ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: return ("This is a test", "This is a test") def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) __SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0] __SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ).input_ids[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @require_torch def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 1_0_0_0, "short example"] __SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] __SCREAMING_SNAKE_CASE = self._large_tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" ) __SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=UpperCAmelCase__ , max_length=5 , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase__ ) == 2 # input_ids, attention_mask. def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) __SCREAMING_SNAKE_CASE = self._large_tokenizer(UpperCAmelCase__ ).input_ids self.assertListEqual( UpperCAmelCase__ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
682
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a__ : int = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' a__ : Union[str, Any] = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' a__ : Optional[Any] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' def remove_articles(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE ) return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )] return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams] __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for sgram, scount in sgramcounter.items(): __SCREAMING_SNAKE_CASE = scount * numref __SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = Counter() for cgram, ccount in cgramcounter.items(): __SCREAMING_SNAKE_CASE = ccount * numref # KEEP __SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep __SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __SCREAMING_SNAKE_CASE = 0 if keepscore_precision > 0 or keepscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep __SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ ) # ADDITION __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 1 if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: __SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = 0 if addscore_precision > 0 or addscore_recall > 0: __SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = ssent.split(" " ) __SCREAMING_SNAKE_CASE = csent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for rsent in rsents: __SCREAMING_SNAKE_CASE = rsent.split(" " ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) ragramslist.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(lowerCAmelCase_ ) for i in range(0 , len(lowerCAmelCase_ ) - 1 ): if i < len(lowerCAmelCase_ ) - 1: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 2: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(lowerCAmelCase_ ) if i < len(lowerCAmelCase_ ) - 3: __SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4 __SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4 __SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ): '''simple docstring''' if lowercase: __SCREAMING_SNAKE_CASE = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ ) elif tokenizer == "moses": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ ) elif tokenizer == "penn": __SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ ) else: __SCREAMING_SNAKE_CASE = sentence if not return_str: __SCREAMING_SNAKE_CASE = normalized_sent.split() return normalized_sent def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )): raise ValueError("Sources length must match predictions and references lengths." ) __SCREAMING_SNAKE_CASE = 0 for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] ) __SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ ) return 100 * sari_score def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )] __SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu( lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} ) return result
682
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : str ) -> Any: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple: __SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : str = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : int = True def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class in get_values(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = NezhaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : int ) -> List[Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: # This regression test was failing with PyTorch < 1.3 ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> int: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
682
1
"""simple docstring""" import operator as op a__ : Dict = '''scaler.pt''' a__ : Any = '''pytorch_model''' a__ : int = '''random_states''' a__ : List[str] = '''optimizer''' a__ : Tuple = '''scheduler''' a__ : Optional[Any] = '''pytorch_model.bin''' a__ : Optional[Any] = '''pytorch_model.bin.index.json''' a__ : Dict = '''model.safetensors''' a__ : str = '''model.safetensors.index.json''' a__ : List[str] = '''1.10.2''' a__ : str = '''py38''' a__ : Any = '''4.17.0''' a__ : Optional[Any] = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] a__ : List[str] = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] a__ : List[str] = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] a__ : Tuple = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] a__ : Union[str, Any] = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] a__ : int = '''2.0.1''' a__ : List[Any] = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] a__ : int = ['''default''', '''reduce-overhead''', '''max-autotune'''] a__ : Union[str, Any] = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 a__ : Any = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] a__ : List[str] = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] a__ : str = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
682
"""simple docstring""" import os def UpperCAmelCase__ (): '''simple docstring''' with open(os.path.dirname(lowerCAmelCase_ ) + "/p022_names.txt" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("\"" , "" ).split("," ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(lowerCAmelCase_ ): for letter in name: name_score += ord(lowerCAmelCase_ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
682
1
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1.5 __SCREAMING_SNAKE_CASE = int(factor * num_class_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: __SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ ) if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4: break else: __SCREAMING_SNAKE_CASE = int(factor * num_images ) __SCREAMING_SNAKE_CASE = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ ) with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open( f"""{class_data_dir}/images.txt""" , "w" ) as fa: while total < num_class_images: __SCREAMING_SNAKE_CASE = class_images[count] count += 1 try: __SCREAMING_SNAKE_CASE = requests.get(images["url"] ) if img.status_code == 200: __SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ ) return parser.parse_args() if __name__ == "__main__": a__ : Optional[Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
682
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=lowerCAmelCase_ , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ ) return parser.parse_args() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = parse_args() # Import training_script as a module. __SCREAMING_SNAKE_CASE = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __SCREAMING_SNAKE_CASE = script_fpath.stem __SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ ) # Patch sys.argv __SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
682
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a__ : str = logging.get_logger(__name__) class UpperCamelCase_ ( enum.Enum): """simple docstring""" snake_case__ : Optional[int] = 0 snake_case__ : Dict = 1 @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "generated" def __init__( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) -> Dict: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = {} if truncation is not None: __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = generate_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None and return_type is None: __SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __SCREAMING_SNAKE_CASE = return_type if clean_up_tokenization_spaces is not None: __SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces if stop_sequence is not None: __SCREAMING_SNAKE_CASE = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) __SCREAMING_SNAKE_CASE = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> List[str]: return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCAmelCase__ ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) __SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],) __SCREAMING_SNAKE_CASE = True elif isinstance(args[0] , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = (prefix + args[0],) __SCREAMING_SNAKE_CASE = False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" ) __SCREAMING_SNAKE_CASE = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) if ( isinstance(args[0] , UpperCAmelCase__ ) and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] ) and all(len(UpperCAmelCase__ ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase__ : int ) -> Tuple: __SCREAMING_SNAKE_CASE = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ ) return inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any ) -> Any: if self.framework == "pt": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_inputs["input_ids"].shape elif self.framework == "tf": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tf.shape(model_inputs["input_ids"] ).numpy() __SCREAMING_SNAKE_CASE = generate_kwargs.get("min_length" , self.model.config.min_length ) __SCREAMING_SNAKE_CASE = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(UpperCAmelCase__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) __SCREAMING_SNAKE_CASE = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = output_ids.shape[0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __SCREAMING_SNAKE_CASE = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=ReturnType.TEXT , UpperCAmelCase__ : str=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: __SCREAMING_SNAKE_CASE = { F"""{self.return_name}_text""": self.tokenizer.decode( UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , ) } records.append(UpperCAmelCase__ ) return records @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "summary" def __call__( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> Optional[int]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool: if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" ) if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" ) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = "translation" def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]: if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase_ ( self : Any , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]: if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase__ ): return self.tokenizer._build_translation_inputs( *UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ ) else: return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**UpperCAmelCase__ ) if src_lang is not None: __SCREAMING_SNAKE_CASE = src_lang if tgt_lang is not None: __SCREAMING_SNAKE_CASE = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __SCREAMING_SNAKE_CASE = kwargs.get("task" , self.task ) __SCREAMING_SNAKE_CASE = task.split("_" ) if task and len(UpperCAmelCase__ ) == 4: # translation, XX, to YY __SCREAMING_SNAKE_CASE = items[1] __SCREAMING_SNAKE_CASE = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Any ) -> List[Any]: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
1
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for data in source_data: for i, el in enumerate(lowerCAmelCase_ ): if len(lowerCAmelCase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowerCAmelCase_ ) ) return data_lists def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for dlist, weight in zip(lowerCAmelCase_ , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __SCREAMING_SNAKE_CASE = f"""Invalid weight of {weight:f} provided""" raise ValueError(lowerCAmelCase_ ) score_lists.append(lowerCAmelCase_ ) return score_lists def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = final_scores[j] + ele return final_scores def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_data(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = calculate_each_score(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = generate_final_scores(lowerCAmelCase_ ) # append scores to source data for i, ele in enumerate(lowerCAmelCase_ ): source_data[i].append(lowerCAmelCase_ ) return source_data
682
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
1
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = prime_factors(lowerCAmelCase_ ) if is_square_free(lowerCAmelCase_ ): return -1 if len(lowerCAmelCase_ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
682
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # create attention mask __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.seq_length // 2 __SCREAMING_SNAKE_CASE = 0 # first forward pass __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1 __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = random_other_next_tokens # append to next input_ids and attn_mask __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , ) # get two different outputs __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval() __SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ ) # first forward pass __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"] __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[ "last_hidden_state" ] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () snake_case__ : Tuple = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Optional[Any] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = "left" # Define PAD Token = EOS Token = 50256 __SCREAMING_SNAKE_CASE = tokenizer.eos_token __SCREAMING_SNAKE_CASE = model.config.eos_token_id # use different length sentences to test batching __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little", "Today, I", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , ) __SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() __SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ ( self : List[Any] ) -> str: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = "multi_label_classification" __SCREAMING_SNAKE_CASE = input_dict["input_ids"] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = 4_2_3_8_4 __SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: __SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) __SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model.generate( **UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
1
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING a__ : List[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : List[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) -> List[str]: super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) self.check_model_type(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : List[Any] ) -> int: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = {}, {} if padding is not None: __SCREAMING_SNAKE_CASE = padding if truncation is not None: __SCREAMING_SNAKE_CASE = truncation if top_k is not None: __SCREAMING_SNAKE_CASE = top_k return preprocess_params, {}, postprocess_params def __call__( self : Any , UpperCAmelCase__ : Union["Image.Image", str] , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : List[str] ) -> Any: if isinstance(UpperCAmelCase__ , (Image.Image, str) ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = {"image": image, "question": question} else: __SCREAMING_SNAKE_CASE = image __SCREAMING_SNAKE_CASE = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ ) return results def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : int=False ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = load_image(inputs["image"] ) __SCREAMING_SNAKE_CASE = self.tokenizer( inputs["question"] , return_tensors=self.framework , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework ) model_inputs.update(UpperCAmelCase__ ) return model_inputs def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> str: __SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase__ ) return model_outputs def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=5 ) -> Tuple: if top_k > self.model.config.num_labels: __SCREAMING_SNAKE_CASE = self.model.config.num_labels if self.framework == "pt": __SCREAMING_SNAKE_CASE = model_outputs.logits.sigmoid()[0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = probs.topk(UpperCAmelCase__ ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = scores.tolist() __SCREAMING_SNAKE_CASE = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
682
"""simple docstring""" import os import pytest from attr import dataclass a__ : int = '''us-east-1''' # defaults region @dataclass class UpperCamelCase_ : """simple docstring""" snake_case__ : str snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role" snake_case__ : Optional[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000} @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : int ) -> str: return F"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : List[Any] ) -> str: return F"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : Any ) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class" ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
682
1
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if openai_config_file == "": __SCREAMING_SNAKE_CASE = OpenAIGPTConfig() else: __SCREAMING_SNAKE_CASE = OpenAIGPTConfig.from_json_file(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = OpenAIGPTModel(lowerCAmelCase_ ) # Load weights from numpy load_tf_weights_in_openai_gpt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model __SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + WEIGHTS_NAME __SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowerCAmelCase_ ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--openai_checkpoint_folder_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--openai_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) a__ : List[str] = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
682
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging a__ : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any: warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , UpperCAmelCase__ , ) super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
682
1
"""simple docstring""" from typing import Dict from .base import GenericTensor, Pipeline class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: if tokenize_kwargs is None: __SCREAMING_SNAKE_CASE = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( "truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" ) __SCREAMING_SNAKE_CASE = truncation __SCREAMING_SNAKE_CASE = tokenize_kwargs __SCREAMING_SNAKE_CASE = {} if return_tensors is not None: __SCREAMING_SNAKE_CASE = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ) -> Dict[str, GenericTensor]: __SCREAMING_SNAKE_CASE = self.framework __SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ ) return model_inputs def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase__ ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str=False ) -> Optional[int]: # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> Dict: return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
682
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if collection == []: return [] # get some information about the collection __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ ) # create the counting array __SCREAMING_SNAKE_CASE = coll_max + 1 - coll_min __SCREAMING_SNAKE_CASE = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = counting_arr[i] + counting_arr[i - 1] # create the output collection __SCREAMING_SNAKE_CASE = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowerCAmelCase_ ) ): __SCREAMING_SNAKE_CASE = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" a__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() a__ : Optional[Any] = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
682
1
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') a__ : List[str] = int(input('''Enter number: ''').strip()) print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
682
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
682
1
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=1_3 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Dict=9_9 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=5_1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Union[str, Any]="last" , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , ) -> Dict: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_lengths __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = gelu_activation __SCREAMING_SNAKE_CASE = sinusoidal_embeddings __SCREAMING_SNAKE_CASE = causal __SCREAMING_SNAKE_CASE = asm __SCREAMING_SNAKE_CASE = n_langs __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = n_special __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = summary_type __SCREAMING_SNAKE_CASE = use_proj __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase_ ( self : Optional[int] ) -> int: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_input_lengths: __SCREAMING_SNAKE_CASE = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float() __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase_ ( self : Dict ) -> Any: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , ) -> str: __SCREAMING_SNAKE_CASE = FlaubertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , lengths=UpperCAmelCase__ , langs=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , langs=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , ) -> str: __SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , ) -> str: __SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , p_mask=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , cls_index=UpperCAmelCase__ , is_impossible=UpperCAmelCase__ , ) ((__SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ ) ((__SCREAMING_SNAKE_CASE) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ) -> Dict: __SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , ) -> List[Any]: __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : int ) -> str: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Any = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ) -> List[Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=False ) -> Dict: __SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ ) return inputs_dict def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = FlaubertModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , emb_dim=3_7 ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Dict ) -> str: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple ) -> Tuple: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> int: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def UpperCAmelCase_ ( self : Dict ) -> Tuple: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.jit.trace( UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "traced_model.pt" ) ) __SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "traced_model.pt" ) , map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
682
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : List[str] = logging.get_logger(__name__) a__ : str = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "xlm-roberta" def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"} else: __SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
682
1
"""simple docstring""" import baseaa def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return baseaa.baaencode(string.encode("utf-8" ) ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return baseaa.baadecode(lowerCAmelCase_ ).decode("utf-8" ) if __name__ == "__main__": a__ : Optional[int] = '''Hello World!''' a__ : Tuple = baseaa_encode(test) print(encoded) a__ : Union[str, Any] = baseaa_decode(encoded) print(decoded)
682
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ ) return flax_params def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = { "token_embedder": "embeddings", "encoder_norm": "layernorm", "kernel": "weight", ".out": ".output", "scale": "weight", "embedders_0.pos_embedding": "row_embedder.weight", "embedders_1.pos_embedding": "column_embedder.weight", } __SCREAMING_SNAKE_CASE = { "query": "attention.query", "key": "attention.key", "value": "attention.value", "output.dense": "output", "encoder_decoder_attention.o": "encoder_decoder_attention.attention.o", "pre_self_attention_layer_norm": "self_attention.layer_norm", "pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm", "mlp.": "mlp.DenseReluDense.", "pre_mlp_layer_norm": "mlp.layer_norm", "self_attention.o": "self_attention.attention.o", "decoder.embeddings.embedding": "decoder.embed_tokens.weight", "decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight", "decoder.decoder_norm.weight": "decoder.final_layer_norm.weight", "decoder.logits_dense.weight": "decoder.lm_head.weight", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __SCREAMING_SNAKE_CASE = ".".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = new_key.replace("encoder" , "encoder.encoder" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"layer.\1" , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = flax_dict[key] __SCREAMING_SNAKE_CASE = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key].T ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = get_flax_param(lowerCAmelCase_ ) if not use_large: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig() __SCREAMING_SNAKE_CASE = PixaStructTextConfig() else: __SCREAMING_SNAKE_CASE = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __SCREAMING_SNAKE_CASE = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = PixaStructForConditionalGeneration(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" ) __SCREAMING_SNAKE_CASE = PixaStructImageProcessor() __SCREAMING_SNAKE_CASE = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: __SCREAMING_SNAKE_CASE = 4096 __SCREAMING_SNAKE_CASE = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print("Model saved in {}".format(lowerCAmelCase_ ) ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') a__ : Optional[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
682
1
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = right or len(lowerCAmelCase_ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
682
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : str = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCAmelCase_ ) def inner_training_loop(lowerCAmelCase_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
1
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [] __SCREAMING_SNAKE_CASE = list(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted_examples[0] def is_too_big(lowerCAmelCase_ ): return tok(lowerCAmelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __SCREAMING_SNAKE_CASE = new_src + " " + src __SCREAMING_SNAKE_CASE = new_tgt + " " + tgt if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example finished_src.append(lowerCAmelCase_ ) finished_tgt.append(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = src, tgt else: # can fit, keep adding __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(lowerCAmelCase_ ) finished_tgt.append(lowerCAmelCase_ ) return finished_src, finished_tgt def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ) save_path.mkdir(exist_ok=lowerCAmelCase_ ) for split in ["train"]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()] __SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pack_examples(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" ) Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) ) Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) ) for split in ["val", "test"]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.source""" ) shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.target""" ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=lowerCAmelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=lowerCAmelCase_ , default=128 ) parser.add_argument("--data_dir" , type=lowerCAmelCase_ ) parser.add_argument("--save_path" , type=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(lowerCAmelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
682
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a__ : Dict = logging.get_logger(__name__) # General docstring a__ : str = '''RegNetConfig''' # Base docstring a__ : List[str] = '''facebook/regnet-y-040''' a__ : int = [1, 1_0_8_8, 7, 7] # Image classification docstring a__ : int = '''facebook/regnet-y-040''' a__ : str = '''tabby, tabby cat''' a__ : Optional[Any] = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , **UpperCAmelCase__ : Tuple , ) -> Any: super().__init__(**UpperCAmelCase__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , strides=UpperCAmelCase__ , padding="VALID" , groups=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" , ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) __SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Optional[int] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.convolution(self.padding(UpperCAmelCase__ ) ) __SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_channels __SCREAMING_SNAKE_CASE = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = shape_list(UpperCAmelCase__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 2, 3, 1) ) __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD( filters=UpperCAmelCase__ , kernel_size=1 , strides=UpperCAmelCase__ , use_bias=UpperCAmelCase__ , name="convolution" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(UpperCAmelCase__ ) , training=UpperCAmelCase__ ) class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) __SCREAMING_SNAKE_CASE = [ tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=UpperCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] ) -> Any: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) for layer_module in self.attention: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = hidden_state * pooled return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Dict , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : int ) -> str: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.2" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str ) -> Any: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : List[Any] ) -> Any: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1 __SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width ) __SCREAMING_SNAKE_CASE = ( TFRegNetShortCut(UpperCAmelCase__ , stride=UpperCAmelCase__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) __SCREAMING_SNAKE_CASE = [ TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ , name="layer.3" ), ] __SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = hidden_state for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.shortcut(UpperCAmelCase__ ) hidden_state += residual __SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer __SCREAMING_SNAKE_CASE = [ # downsampling is done in the first layer with stride of 2 layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , name="layers.0" ), *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : int ) -> int: for layer_module in self.layers: __SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase__ ) return hidden_state class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" def __init__( self : Any , UpperCAmelCase__ : RegNetConfig , **UpperCAmelCase__ : Any ) -> List[str]: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) __SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(UpperCAmelCase__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ , name=F"""stages.{i+1}""" ) ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> TFBaseModelOutputWithNoAttention: __SCREAMING_SNAKE_CASE = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) __SCREAMING_SNAKE_CASE = stage_module(UpperCAmelCase__ ) if output_hidden_states: __SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ ) @keras_serializable class UpperCamelCase_ ( tf.keras.layers.Layer): """simple docstring""" snake_case__ : Any = RegNetConfig def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> Tuple: super().__init__(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(UpperCAmelCase__ , name="embedder" ) __SCREAMING_SNAKE_CASE = TFRegNetEncoder(UpperCAmelCase__ , name="encoder" ) __SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=UpperCAmelCase__ , name="pooler" ) @unpack_inputs def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.embedder(UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.encoder( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = encoder_outputs[0] __SCREAMING_SNAKE_CASE = self.pooler(UpperCAmelCase__ ) # Change to NCHW output format have uniformity in the modules __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) __SCREAMING_SNAKE_CASE = tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __SCREAMING_SNAKE_CASE = tuple([tf.transpose(UpperCAmelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : List[Any] = RegNetConfig snake_case__ : List[str] = "regnet" snake_case__ : str = "pixel_values" @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} a__ : Union[str, Any] = r''' Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. ''' a__ : Optional[int] = r''' Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> Tuple: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( pixel_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : RegNetConfig , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> Any: super().__init__(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = config.num_labels __SCREAMING_SNAKE_CASE = TFRegNetMainLayer(UpperCAmelCase__ , name="regnet" ) # classification head __SCREAMING_SNAKE_CASE = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(UpperCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : tf.Tensor = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: __SCREAMING_SNAKE_CASE = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict __SCREAMING_SNAKE_CASE = self.regnet( UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , training=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1] __SCREAMING_SNAKE_CASE = self.classifier[0](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.classifier[1](UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=UpperCAmelCase__ , logits=UpperCAmelCase__ ) if not return_dict: __SCREAMING_SNAKE_CASE = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
682
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True}) snake_case__ : ClassVar[Features] = Features({"audio": Audio()}) snake_case__ : ClassVar[Features] = Features({"transcription": Value("string")}) snake_case__ : str = "audio" snake_case__ : str = "transcription" def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , UpperCAmelCase__ ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) __SCREAMING_SNAKE_CASE = copy.deepcopy(self ) __SCREAMING_SNAKE_CASE = self.input_schema.copy() __SCREAMING_SNAKE_CASE = features[self.audio_column] __SCREAMING_SNAKE_CASE = input_schema return task_template @property def UpperCAmelCase_ ( self : List[Any] ) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
682
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @property def UpperCAmelCase_ ( self : Dict ) -> List[str]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler() __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) sde_ve.to(UpperCAmelCase__ ) sde_ve.set_progress_bar_config(disable=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=UpperCAmelCase__ ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )[ 0 ] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = "google/ncsnpp-church-256" __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler.from_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) sde_ve.to(UpperCAmelCase__ ) sde_ve.set_progress_bar_config(disable=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=UpperCAmelCase__ ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
682
"""simple docstring""" import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = r''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[Any] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None ) -> Optional[int]: __SCREAMING_SNAKE_CASE = max_length __SCREAMING_SNAKE_CASE = max_position_embeddings @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : List[str] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Optional[int] ) -> bool: __SCREAMING_SNAKE_CASE = input_ids.shape[-1] __SCREAMING_SNAKE_CASE = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = start_length __SCREAMING_SNAKE_CASE = max_new_tokens __SCREAMING_SNAKE_CASE = start_length + max_new_tokens @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Union[str, Any] , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : Tuple ) -> bool: return input_ids.shape[-1] >= self.max_length class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[float] = None ) -> Dict: __SCREAMING_SNAKE_CASE = max_time __SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Tuple , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : str ) -> bool: return time.time() - self.initial_timestamp > self.max_time class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @add_start_docstrings(UpperCAmelCase__ ) def __call__( self : Dict , UpperCAmelCase__ : torch.LongTensor , UpperCAmelCase__ : torch.FloatTensor , **UpperCAmelCase__ : List[str] ) -> bool: return any(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) for criteria in self ) @property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: for stopping_criterium in self: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return stopping_criterium.max_length return None def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = stopping_criteria.max_length __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , lowerCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) ) return new_stopping_criteria
682
1
"""simple docstring""" import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" def UpperCAmelCase_ ( self : Dict ) -> Any: __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = 8 # DPR tok __SCREAMING_SNAKE_CASE = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok __SCREAMING_SNAKE_CASE = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] __SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) __SCREAMING_SNAKE_CASE = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"} __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase__ ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase_ ( self : int ) -> List[str]: __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , "rag_tokenizer" ) __SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(UpperCAmelCase__ ) rag_tokenizer.save_pretrained(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , UpperCAmelCase__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , UpperCAmelCase__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("facebook/rag-token-nq" ) __SCREAMING_SNAKE_CASE = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Dict: __SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" ) __SCREAMING_SNAKE_CASE = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] __SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ )
682
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int = RoCBertTokenizer snake_case__ : int = None snake_case__ : Optional[Any] = False snake_case__ : int = True snake_case__ : Any = filter_non_english def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: super().setUp() __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} for i, value in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer: json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Any ) -> List[str]: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("你好[SEP]你是谁" ) self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] ) def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Dict ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase_ ( self : int ) -> Dict: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : int ) -> List[Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase_ ( self : str ) -> List[str]: __SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] __SCREAMING_SNAKE_CASE = {} for i, token in enumerate(UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = i __SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase_ ( self : List[Any] ) -> str: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase_ ( self : List[str] ) -> Tuple: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase_ ( self : int ) -> int: __SCREAMING_SNAKE_CASE = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) if self.test_rust_tokenizer: __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) __SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False __SCREAMING_SNAKE_CASE = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "Allen"), ((2_1, 2_3), "##NL"), ((2_3, 2_4), "##P"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), "allen"), ((2_1, 2_3), "##nl"), ((2_3, 2_4), "##p"), ((2_5, 3_3), "sentence"), ((3_3, 3_4), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = ["的", "人", "有"] __SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def UpperCAmelCase_ ( self : str ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __SCREAMING_SNAKE_CASE = "你好,你是谁" __SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
682
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a__ : Optional[Any] = 1_6 a__ : Tuple = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" ) __SCREAMING_SNAKE_CASE = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __SCREAMING_SNAKE_CASE = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __SCREAMING_SNAKE_CASE = 16 elif accelerator.mixed_precision != "no": __SCREAMING_SNAKE_CASE = 8 else: __SCREAMING_SNAKE_CASE = None return tokenizer.pad( lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders a__ : Tuple = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1": __SCREAMING_SNAKE_CASE = 2 # Initialize accelerator __SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation __SCREAMING_SNAKE_CASE = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE __SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __SCREAMING_SNAKE_CASE = model.to(accelerator.device ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.loss __SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __SCREAMING_SNAKE_CASE = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCAmelCase_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __SCREAMING_SNAKE_CASE = predictions[: len(eval_dataloader.dataset ) - samples_seen] __SCREAMING_SNAKE_CASE = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
682
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] = "vivit" def __init__( self : Dict , UpperCAmelCase__ : Dict=2_2_4 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=[2, 1_6, 1_6] , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : Optional[int]="gelu_fast" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : str=1E-06 , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Any , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = num_frames __SCREAMING_SNAKE_CASE = tubelet_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias super().__init__(**UpperCAmelCase__ )
682
1