code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device UpperCAmelCase_ = False class lowerCamelCase__( unittest.TestCase): pass @slow @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) __lowerCamelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pipe( image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images __lowerCamelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : List[Any] = logging.get_logger(__name__) lowerCamelCase : str = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( A__ ): A__ = 'time_series_transformer' A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Optional[int] , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : Optional[Union[str, bool]] = "mean" , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 32 , _a : int = 32 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : bool = True , _a : str = "gelu" , _a : int = 64 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : Union[str, Any]=True , **_a : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =prediction_length _SCREAMING_SNAKE_CASE =context_length or prediction_length _SCREAMING_SNAKE_CASE =distribution_output _SCREAMING_SNAKE_CASE =loss _SCREAMING_SNAKE_CASE =input_size _SCREAMING_SNAKE_CASE =num_time_features _SCREAMING_SNAKE_CASE =lags_sequence _SCREAMING_SNAKE_CASE =scaling _SCREAMING_SNAKE_CASE =num_dynamic_real_features _SCREAMING_SNAKE_CASE =num_static_real_features _SCREAMING_SNAKE_CASE =num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =cardinality else: _SCREAMING_SNAKE_CASE =[0] if embedding_dimension and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =embedding_dimension else: _SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _SCREAMING_SNAKE_CASE =num_parallel_samples # Transformer architecture configuration _SCREAMING_SNAKE_CASE =input_size * len(_a ) + self._number_of_features _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =encoder_attention_heads _SCREAMING_SNAKE_CASE =decoder_attention_heads _SCREAMING_SNAKE_CASE =encoder_ffn_dim _SCREAMING_SNAKE_CASE =decoder_ffn_dim _SCREAMING_SNAKE_CASE =encoder_layers _SCREAMING_SNAKE_CASE =decoder_layers _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =attention_dropout _SCREAMING_SNAKE_CASE =activation_dropout _SCREAMING_SNAKE_CASE =encoder_layerdrop _SCREAMING_SNAKE_CASE =decoder_layerdrop _SCREAMING_SNAKE_CASE =activation_function _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =use_cache super().__init__(is_encoder_decoder=_a , **_a ) @property def A ( self : List[Any] ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[int]=18 , lowerCAmelCase__ : Dict=30 , lowerCAmelCase__ : int=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ): SCREAMING_SNAKE_CASE_: Tuple = parent SCREAMING_SNAKE_CASE_: Optional[int] = batch_size SCREAMING_SNAKE_CASE_: List[Any] = num_channels SCREAMING_SNAKE_CASE_: List[str] = image_size SCREAMING_SNAKE_CASE_: Dict = min_resolution SCREAMING_SNAKE_CASE_: Union[str, Any] = max_resolution SCREAMING_SNAKE_CASE_: str = do_resize SCREAMING_SNAKE_CASE_: int = size if size is not None else {"height": 18, "width": 20} SCREAMING_SNAKE_CASE_: str = do_thumbnail SCREAMING_SNAKE_CASE_: Optional[int] = do_align_axis SCREAMING_SNAKE_CASE_: str = do_pad SCREAMING_SNAKE_CASE_: List[str] = do_normalize SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean SCREAMING_SNAKE_CASE_: List[Any] = image_std def _SCREAMING_SNAKE_CASE ( self : Any): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowercase ( A__ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = DonutImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : Tuple): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_a , "do_resize")) self.assertTrue(hasattr(_a , "size")) self.assertTrue(hasattr(_a , "do_thumbnail")) self.assertTrue(hasattr(_a , "do_align_long_axis")) self.assertTrue(hasattr(_a , "do_pad")) self.assertTrue(hasattr(_a , "do_normalize")) self.assertTrue(hasattr(_a , "image_mean")) self.assertTrue(hasattr(_a , "image_std")) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Any = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"height": 18, "width": 20}) SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"height": 42, "width": 42}) # Previous config had dimensions in (width, height) order SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84)) self.assertEqual(image_processor.size , {"height": 84, "width": 42}) def _SCREAMING_SNAKE_CASE ( self : List[Any]): pass @is_flaky() def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Any = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a) for image in image_inputs: self.assertIsInstance(_a , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(_a , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a) for image in image_inputs: self.assertIsInstance(_a , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_: Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(_a , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: List[str] = image_processing(_a , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
13
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets lowerCamelCase : List[Any] = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" lowerCamelCase : Optional[Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" lowerCamelCase : int = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> List[Any]: """simple docstring""" return float((preds == labels).mean() ) def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase ) _SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : int ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =en_sentvecs.shape[0] # mean centering _SCREAMING_SNAKE_CASE =en_sentvecs - np.mean(_UpperCamelCase , axis=0 ) _SCREAMING_SNAKE_CASE =in_sentvecs - np.mean(_UpperCamelCase , axis=0 ) _SCREAMING_SNAKE_CASE =cdist(_UpperCamelCase , _UpperCamelCase , 'cosine' ) _SCREAMING_SNAKE_CASE =np.array(range(_UpperCamelCase ) ) _SCREAMING_SNAKE_CASE =sim.argsort(axis=1 )[:, :10] _SCREAMING_SNAKE_CASE =np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def A ( self : Any ) -> List[str]: '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), 'references': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , ) def A ( self : List[str] , _a : Tuple , _a : Optional[int] ) -> int: '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_a , _a )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_a , _a ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_a , _a )} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' )
47
0
"""simple docstring""" def _snake_case ( _snake_case : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Optional[Any] = [], [] while len(_UpperCamelCase ) > 1: lowerCAmelCase, lowerCAmelCase : Any = min(_UpperCamelCase ), max(_UpperCamelCase ) start.append(_UpperCamelCase ) end.append(_UpperCamelCase ) collection.remove(_UpperCamelCase ) collection.remove(_UpperCamelCase ) end.reverse() return start + collection + end if __name__ == "__main__": snake_case__ : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() snake_case__ : Any = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
60
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class A__ ( A__ ): A__ = 'deta' A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple , _a : Optional[int]=None , _a : int=900 , _a : Optional[Any]=2048 , _a : int=6 , _a : Tuple=2048 , _a : Optional[int]=8 , _a : Any=6 , _a : str=1024 , _a : int=8 , _a : int=0.0 , _a : Optional[Any]=True , _a : Tuple="relu" , _a : Union[str, Any]=256 , _a : Tuple=0.1 , _a : str=0.0 , _a : Dict=0.0 , _a : Tuple=0.02 , _a : Union[str, Any]=1.0 , _a : Any=True , _a : Tuple=False , _a : List[Any]="sine" , _a : str=5 , _a : List[Any]=4 , _a : str=4 , _a : Union[str, Any]=True , _a : Optional[int]=300 , _a : Dict=True , _a : List[Any]=True , _a : List[Any]=1 , _a : List[str]=5 , _a : int=2 , _a : Dict=1 , _a : str=1 , _a : Optional[Any]=5 , _a : Union[str, Any]=2 , _a : List[str]=0.1 , _a : List[Any]=0.25 , **_a : Union[str, Any] , ) -> List[str]: '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _SCREAMING_SNAKE_CASE =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] ) else: if isinstance(_a , _a ): _SCREAMING_SNAKE_CASE =backbone_config.pop('model_type' ) _SCREAMING_SNAKE_CASE =CONFIG_MAPPING[backbone_model_type] _SCREAMING_SNAKE_CASE =config_class.from_dict(_a ) _SCREAMING_SNAKE_CASE =backbone_config _SCREAMING_SNAKE_CASE =num_queries _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =encoder_ffn_dim _SCREAMING_SNAKE_CASE =encoder_layers _SCREAMING_SNAKE_CASE =encoder_attention_heads _SCREAMING_SNAKE_CASE =decoder_ffn_dim _SCREAMING_SNAKE_CASE =decoder_layers _SCREAMING_SNAKE_CASE =decoder_attention_heads _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =attention_dropout _SCREAMING_SNAKE_CASE =activation_dropout _SCREAMING_SNAKE_CASE =activation_function _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =init_xavier_std _SCREAMING_SNAKE_CASE =encoder_layerdrop _SCREAMING_SNAKE_CASE =auxiliary_loss _SCREAMING_SNAKE_CASE =position_embedding_type # deformable attributes _SCREAMING_SNAKE_CASE =num_feature_levels _SCREAMING_SNAKE_CASE =encoder_n_points _SCREAMING_SNAKE_CASE =decoder_n_points _SCREAMING_SNAKE_CASE =two_stage _SCREAMING_SNAKE_CASE =two_stage_num_proposals _SCREAMING_SNAKE_CASE =with_box_refine _SCREAMING_SNAKE_CASE =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher _SCREAMING_SNAKE_CASE =class_cost _SCREAMING_SNAKE_CASE =bbox_cost _SCREAMING_SNAKE_CASE =giou_cost # Loss coefficients _SCREAMING_SNAKE_CASE =mask_loss_coefficient _SCREAMING_SNAKE_CASE =dice_loss_coefficient _SCREAMING_SNAKE_CASE =bbox_loss_coefficient _SCREAMING_SNAKE_CASE =giou_loss_coefficient _SCREAMING_SNAKE_CASE =eos_coefficient _SCREAMING_SNAKE_CASE =focal_alpha super().__init__(is_encoder_decoder=_a , **_a ) @property def A ( self : Dict ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def A ( self : List[Any] ) -> int: '''simple docstring''' return self.d_model def A ( self : Optional[int] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ ) _SCREAMING_SNAKE_CASE =self.backbone_config.to_dict() _SCREAMING_SNAKE_CASE =self.__class__.model_type return output
47
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _SCREAMING_SNAKE_CASE( unittest.TestCase ): def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=18 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=4_00 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=None ,) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :str = size if size is not None else {'''shortest_edge''': 18} __SCREAMING_SNAKE_CASE :int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE :Tuple = parent __SCREAMING_SNAKE_CASE :str = batch_size __SCREAMING_SNAKE_CASE :Dict = num_channels __SCREAMING_SNAKE_CASE :Optional[int] = num_frames __SCREAMING_SNAKE_CASE :List[Any] = image_size __SCREAMING_SNAKE_CASE :Optional[int] = min_resolution __SCREAMING_SNAKE_CASE :Optional[int] = max_resolution __SCREAMING_SNAKE_CASE :Any = do_resize __SCREAMING_SNAKE_CASE :Optional[Any] = size __SCREAMING_SNAKE_CASE :List[str] = do_normalize __SCREAMING_SNAKE_CASE :int = image_mean __SCREAMING_SNAKE_CASE :Any = image_std __SCREAMING_SNAKE_CASE :int = crop_size def _UpperCamelCase ( self ) -> Dict: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = VivitImageProcessor if is_vision_available() else None def _UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Any = VivitImageProcessingTester(self ) @property def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a ,'''image_mean''' ) ) self.assertTrue(hasattr(_a ,'''image_std''' ) ) self.assertTrue(hasattr(_a ,'''do_normalize''' ) ) self.assertTrue(hasattr(_a ,'''do_resize''' ) ) self.assertTrue(hasattr(_a ,'''do_center_crop''' ) ) self.assertTrue(hasattr(_a ,'''size''' ) ) def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def _UpperCamelCase ( self ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos __SCREAMING_SNAKE_CASE :List[str] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_a ) for video in video_inputs: self.assertIsInstance(_a ,_a ) self.assertIsInstance(video[0] ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE :Optional[int] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __SCREAMING_SNAKE_CASE :Any = image_processing(_a ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE :List[Any] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a ) for video in video_inputs: self.assertIsInstance(_a ,_a ) self.assertIsInstance(video[0] ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE :int = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __SCREAMING_SNAKE_CASE :Any = image_processing(_a ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE :str = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a ) for video in video_inputs: self.assertIsInstance(_a ,_a ) self.assertIsInstance(video[0] ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE :Optional[Any] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __SCREAMING_SNAKE_CASE :Union[str, Any] = image_processing(_a ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
191
'''simple docstring''' import numpy as np from PIL import Image def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 # compute the shape of the output matrix _SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _SCREAMING_SNAKE_CASE =np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _SCREAMING_SNAKE_CASE =np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 return updated_arr def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray: """simple docstring""" _SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 # compute the shape of the output matrix _SCREAMING_SNAKE_CASE =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _SCREAMING_SNAKE_CASE =np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _SCREAMING_SNAKE_CASE =int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image lowerCamelCase : Optional[Any] = Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
47
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _A ( A__): def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Dict = 5 # Realm tok SCREAMING_SNAKE_CASE_ : str = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(self.tmpdirname , 'realm_tokenizer' ) os.makedirs(_a , exist_ok=_a ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(_a , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , 'realm_block_records' ) os.makedirs(_a , exist_ok=_a ) def UpperCAmelCase ( self ): """simple docstring""" return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) ) def UpperCAmelCase ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = RealmConfig(num_block_records=self.num_block_records ) return config def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], } ) return dataset def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ] , dtype=_a , ) return block_records def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.get_config() SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ : List[Any] = retriever.tokenizer SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0, 3] , dtype='long' ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer(['Test question'] ).input_ids SCREAMING_SNAKE_CASE_ : Dict = tokenizer( ['the fourth'] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids SCREAMING_SNAKE_CASE_ : int = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='np' ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(len(_a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = self.get_config() SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_retriever() SCREAMING_SNAKE_CASE_ : Dict = retriever.tokenizer SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0, 3, 5] , dtype='long' ) SCREAMING_SNAKE_CASE_ : int = tokenizer(['Test question'] ).input_ids SCREAMING_SNAKE_CASE_ : Tuple = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=_a , return_token_type_ids=_a , return_attention_mask=_a , ).input_ids SCREAMING_SNAKE_CASE_ : List[Any] = config.reader_seq_len SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = retriever( _a , _a , answer_ids=_a , max_length=_a , return_tensors='np' ) self.assertEqual([False, True, True] , _a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _a ) def UpperCAmelCase ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) # Test local path SCREAMING_SNAKE_CASE_ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) self.assertEqual(retriever.block_records[0] , b'This is the first record' ) # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download: SCREAMING_SNAKE_CASE_ : Dict = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME ) SCREAMING_SNAKE_CASE_ : List[str] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' ) self.assertEqual(retriever.block_records[0] , b'This is the first record' )
253
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _lowerCAmelCase ( ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=_UpperCamelCase , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=_UpperCamelCase ) return parser.parse_args() def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =parse_args() # Import training_script as a module. _SCREAMING_SNAKE_CASE =Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _SCREAMING_SNAKE_CASE =script_fpath.stem _SCREAMING_SNAKE_CASE =importlib.import_module(_UpperCamelCase ) # Patch sys.argv _SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
47
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def lowerCAmelCase ( _lowerCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = image.size UpperCAmelCase__ , UpperCAmelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCAmelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) UpperCAmelCase__ = np.array(_UpperCamelCase ).astype(np.floataa ) / 255.0 UpperCAmelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCAmelCase__ = torch.from_numpy(_UpperCamelCase ) return 2.0 * image - 1.0 class _UpperCamelCase ( A__ ): def __init__( self :Tuple , lowerCamelCase :VQModel , lowerCamelCase :UNetaDModel , lowerCamelCase :Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ) -> int: super().__init__() self.register_modules(vqvae=_a , unet=_a , scheduler=_a ) @torch.no_grad() def __call__( self :Any , lowerCamelCase :Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase :Optional[int] = 1 , lowerCamelCase :Optional[int] = 100 , lowerCamelCase :Optional[float] = 0.0 , lowerCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(_a , PIL.Image.Image ): UpperCAmelCase__ = 1 elif isinstance(_a , torch.Tensor ): UpperCAmelCase__ = image.shape[0] else: raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' ) if isinstance(_a , PIL.Image.Image ): UpperCAmelCase__ = preprocess(_a ) UpperCAmelCase__ , UpperCAmelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCAmelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCAmelCase__ = next(self.unet.parameters() ).dtype UpperCAmelCase__ = randn_tensor(_a , generator=_a , device=self.device , dtype=_a ) UpperCAmelCase__ = image.to(device=self.device , dtype=_a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_a , device=self.device ) UpperCAmelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase__ = {} if accepts_eta: UpperCAmelCase__ = eta for t in self.progress_bar(_a ): # concat latents and low resolution image in the channel dimension. UpperCAmelCase__ = torch.cat([latents, image] , dim=1 ) UpperCAmelCase__ = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual UpperCAmelCase__ = self.unet(_a , _a ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase__ = self.scheduler.step(_a , _a , _a , **_a ).prev_sample # decode the image latents with the VQVAE UpperCAmelCase__ = self.vqvae.decode(_a ).sample UpperCAmelCase__ = torch.clamp(_a , -1.0 , 1.0 ) UpperCAmelCase__ = image / 2 + 0.5 UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase__ = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
169
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( A__ ): A__ = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) A__ = 'CIDAS/clipseg-rd64-refined' A__ = 'image_segmenter' A__ = CLIPSegForImageSegmentation A__ = ['image', 'text'] A__ = ['image'] def __init__( self : Any , *_a : Dict , **_a : str ) -> Any: '''simple docstring''' requires_backends(self , ['vision'] ) super().__init__(*_a , **_a ) def A ( self : int , _a : "Image" , _a : str ) -> Optional[Any]: '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=_a , return_tensors='pt' ) def A ( self : Dict , _a : Dict ) -> str: '''simple docstring''' with torch.no_grad(): _SCREAMING_SNAKE_CASE =self.model(**_a ).logits return logits def A ( self : Any , _a : str ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =outputs.cpu().detach().numpy() _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =1 return Image.fromarray((array * 255).astype(np.uinta ) )
47
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class lowerCamelCase__ : SCREAMING_SNAKE_CASE__ = None def __A (self ) -> Tuple: _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) _lowercase =json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , _a ) def __A (self ) -> Any: _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase =os.path.join(_a , '''feat_extract.json''' ) feat_extract_first.to_json_file(_a ) _lowercase =self.feature_extraction_class.from_json_file(_a ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A (self ) -> Tuple: _lowercase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase =feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) _lowercase =self.feature_extraction_class.from_pretrained(_a ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __A (self ) -> Tuple: _lowercase =self.feature_extraction_class() self.assertIsNotNone(_a )
5
'''simple docstring''' from __future__ import annotations import math def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =str(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =[n] for i in range(1 , len(_UpperCamelCase ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool: """simple docstring""" if len(str(_UpperCamelCase ) ) > 3: if not is_prime(int(str(_UpperCamelCase )[-3:] ) ) or not is_prime(int(str(_UpperCamelCase )[:3] ) ): return False return True def _lowerCAmelCase ( _UpperCamelCase : int = 11 ) -> list[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =13 while len(_UpperCamelCase ) != count: if validate(_UpperCamelCase ): _SCREAMING_SNAKE_CASE =list_truncated_nums(_UpperCamelCase ) if all(is_prime(_UpperCamelCase ) for i in list_nums ): list_truncated_primes.append(_UpperCamelCase ) num += 2 return list_truncated_primes def _lowerCAmelCase ( ) -> int: """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(1_1)) = }''')
47
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) a_ = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( A__ ): snake_case_ = """deta""" snake_case_ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Tuple , __lowercase : Optional[int]=None , __lowercase : int=9_00 , __lowercase : Optional[Any]=20_48 , __lowercase : int=6 , __lowercase : Tuple=20_48 , __lowercase : Optional[int]=8 , __lowercase : Any=6 , __lowercase : str=10_24 , __lowercase : int=8 , __lowercase : int=0.0 , __lowercase : Optional[Any]=True , __lowercase : Tuple="relu" , __lowercase : Union[str, Any]=2_56 , __lowercase : Tuple=0.1 , __lowercase : str=0.0 , __lowercase : Dict=0.0 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=1.0 , __lowercase : Any=True , __lowercase : Tuple=False , __lowercase : List[Any]="sine" , __lowercase : str=5 , __lowercase : List[Any]=4 , __lowercase : str=4 , __lowercase : Union[str, Any]=True , __lowercase : Optional[int]=3_00 , __lowercase : Dict=True , __lowercase : List[Any]=True , __lowercase : List[Any]=1 , __lowercase : List[str]=5 , __lowercase : int=2 , __lowercase : Dict=1 , __lowercase : str=1 , __lowercase : Optional[Any]=5 , __lowercase : Union[str, Any]=2 , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.25 , **__lowercase : Union[str, Any] , ) -> List[str]: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) SCREAMING_SNAKE_CASE__ : Dict =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(_a , _a ): SCREAMING_SNAKE_CASE__ : Dict =backbone_config.pop('''model_type''' ) SCREAMING_SNAKE_CASE__ : str =CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE__ : List[str] =config_class.from_dict(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =backbone_config SCREAMING_SNAKE_CASE__ : Tuple =num_queries SCREAMING_SNAKE_CASE__ : Optional[int] =max_position_embeddings SCREAMING_SNAKE_CASE__ : int =d_model SCREAMING_SNAKE_CASE__ : str =encoder_ffn_dim SCREAMING_SNAKE_CASE__ : int =encoder_layers SCREAMING_SNAKE_CASE__ : List[Any] =encoder_attention_heads SCREAMING_SNAKE_CASE__ : Dict =decoder_ffn_dim SCREAMING_SNAKE_CASE__ : List[Any] =decoder_layers SCREAMING_SNAKE_CASE__ : int =decoder_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] =dropout SCREAMING_SNAKE_CASE__ : Any =attention_dropout SCREAMING_SNAKE_CASE__ : Tuple =activation_dropout SCREAMING_SNAKE_CASE__ : Dict =activation_function SCREAMING_SNAKE_CASE__ : Union[str, Any] =init_std SCREAMING_SNAKE_CASE__ : Tuple =init_xavier_std SCREAMING_SNAKE_CASE__ : str =encoder_layerdrop SCREAMING_SNAKE_CASE__ : str =auxiliary_loss SCREAMING_SNAKE_CASE__ : Dict =position_embedding_type # deformable attributes SCREAMING_SNAKE_CASE__ : Optional[Any] =num_feature_levels SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoder_n_points SCREAMING_SNAKE_CASE__ : str =decoder_n_points SCREAMING_SNAKE_CASE__ : Optional[int] =two_stage SCREAMING_SNAKE_CASE__ : int =two_stage_num_proposals SCREAMING_SNAKE_CASE__ : Union[str, Any] =with_box_refine SCREAMING_SNAKE_CASE__ : List[str] =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher SCREAMING_SNAKE_CASE__ : Tuple =class_cost SCREAMING_SNAKE_CASE__ : Tuple =bbox_cost SCREAMING_SNAKE_CASE__ : Union[str, Any] =giou_cost # Loss coefficients SCREAMING_SNAKE_CASE__ : Dict =mask_loss_coefficient SCREAMING_SNAKE_CASE__ : str =dice_loss_coefficient SCREAMING_SNAKE_CASE__ : Optional[Any] =bbox_loss_coefficient SCREAMING_SNAKE_CASE__ : List[Any] =giou_loss_coefficient SCREAMING_SNAKE_CASE__ : Any =eos_coefficient SCREAMING_SNAKE_CASE__ : Optional[int] =focal_alpha super().__init__(is_encoder_decoder=_a , **_a ) @property def __magic_name__ ( self : Dict ) -> int: return self.encoder_attention_heads @property def __magic_name__ ( self : List[Any] ) -> int: return self.d_model def __magic_name__ ( self : Optional[int] ) -> List[str]: SCREAMING_SNAKE_CASE__ : List[str] =copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ : List[str] =self.backbone_config.to_dict() SCREAMING_SNAKE_CASE__ : Tuple =self.__class__.model_type return output
152
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowerCamelCase : int = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20} _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =min_resolution _SCREAMING_SNAKE_CASE =max_resolution _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =do_normalize _SCREAMING_SNAKE_CASE =do_convert_rgb _SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096] _SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16} def A ( self : Any ) -> List[str]: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A ( self : int ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' _SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): A__ = PixaStructImageProcessor if is_vision_available() else None def A ( self : Dict ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self ) @property def A ( self : Optional[Any] ) -> int: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , 'do_normalize' ) ) self.assertTrue(hasattr(_a , 'do_convert_rgb' ) ) def A ( self : Any ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image() _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) _SCREAMING_SNAKE_CASE =2048 _SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) ) def A ( self : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A ( self : List[str] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 _SCREAMING_SNAKE_CASE =True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_a ): _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches _SCREAMING_SNAKE_CASE ='Hello' _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A ( self : List[Any] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): A__ = PixaStructImageProcessor if is_vision_available() else None def A ( self : str ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 ) _SCREAMING_SNAKE_CASE =3 @property def A ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[str] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , 'do_normalize' ) ) self.assertTrue(hasattr(_a , 'do_convert_rgb' ) ) def A ( self : Dict ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE =( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _SCREAMING_SNAKE_CASE =image_processor( image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _SCREAMING_SNAKE_CASE =image_processor( _a , return_tensors='pt' , max_patches=_a ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
47
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _UpperCamelCase : Any = logging.get_logger(__name__) _UpperCamelCase : Dict = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) _UpperCamelCase : Union[str, Any] = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) _UpperCamelCase : Dict = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) _UpperCamelCase : Any = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) _UpperCamelCase : Union[str, Any] = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) _UpperCamelCase : Any = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) _UpperCamelCase : List[Any] = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) _UpperCamelCase : Optional[int] = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) _UpperCamelCase : Optional[Any] = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) _UpperCamelCase : List[str] = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) _UpperCamelCase : Optional[int] = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) _UpperCamelCase : str = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) _UpperCamelCase : Dict = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) _UpperCamelCase : Optional[int] = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) _UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _UpperCamelCase : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _UpperCamelCase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _UpperCamelCase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _UpperCamelCase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _UpperCamelCase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _UpperCamelCase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _UpperCamelCase : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : List[str] = FLAX_MODEL_MAPPING _UpperCamelCase : int = auto_class_update(FlaxAutoModel) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING _UpperCamelCase : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING _UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCamelCase : Any = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCamelCase : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _UpperCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _UpperCamelCase : str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _UpperCamelCase : List[str] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _UpperCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _UpperCamelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class UpperCAmelCase_ ( _BaseAutoModelClass): lowerCamelCase__ : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _UpperCamelCase : Optional[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
77
'''simple docstring''' import copy import re class A__ : A__ = 'hp' A__ = {} A__ = None @classmethod def A ( cls : Optional[Any] , _a : Optional[Any] , _a : Any ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =prefix _SCREAMING_SNAKE_CASE =defaults cls.build_naming_info() @staticmethod def A ( _a : Optional[Any] , _a : List[Any] ) -> Any: '''simple docstring''' if len(_a ) == 0: return "" _SCREAMING_SNAKE_CASE =None if any(char.isdigit() for char in word ): raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_a ) + 1 ): _SCREAMING_SNAKE_CASE =word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE =prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_a : str ): _SCREAMING_SNAKE_CASE ='' while integer != 0: _SCREAMING_SNAKE_CASE =chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s _SCREAMING_SNAKE_CASE =0 while True: _SCREAMING_SNAKE_CASE =word + '#' + int_to_alphabetic(_a ) if sword in info["reverse_short_word"]: continue else: _SCREAMING_SNAKE_CASE =sword break _SCREAMING_SNAKE_CASE =short_word _SCREAMING_SNAKE_CASE =word return short_word @staticmethod def A ( _a : Optional[Any] , _a : int ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =param_name.split('_' ) _SCREAMING_SNAKE_CASE =[TrialShortNamer.shortname_for_word(_a , _a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _SCREAMING_SNAKE_CASE =['', '_'] for separator in separators: _SCREAMING_SNAKE_CASE =separator.join(_a ) if shortname not in info["reverse_short_param"]: _SCREAMING_SNAKE_CASE =shortname _SCREAMING_SNAKE_CASE =param_name return shortname return param_name @staticmethod def A ( _a : Dict , _a : int ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =TrialShortNamer.shortname_for_key(_a , _a ) _SCREAMING_SNAKE_CASE =short_name _SCREAMING_SNAKE_CASE =param_name @classmethod def A ( cls : Optional[int] ) -> Tuple: '''simple docstring''' if cls.NAMING_INFO is not None: return _SCREAMING_SNAKE_CASE ={ 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } _SCREAMING_SNAKE_CASE =list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(_a , _a ) _SCREAMING_SNAKE_CASE =info @classmethod def A ( cls : List[Any] , _a : int ) -> int: '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _SCREAMING_SNAKE_CASE =[copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"You should provide a default value for the param name {k} with value {v}" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _SCREAMING_SNAKE_CASE =cls.NAMING_INFO['short_param'][k] if isinstance(_a , _a ): _SCREAMING_SNAKE_CASE =1 if v else 0 _SCREAMING_SNAKE_CASE ='' if isinstance(_a , (int, float) ) else '-' _SCREAMING_SNAKE_CASE =f"{key}{sep}{v}" name.append(_a ) return "_".join(_a ) @classmethod def A ( cls : Optional[Any] , _a : List[Any] ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =repr[len(cls.PREFIX ) + 1 :] if repr == "": _SCREAMING_SNAKE_CASE =[] else: _SCREAMING_SNAKE_CASE =repr.split('_' ) _SCREAMING_SNAKE_CASE ={} for value in values: if "-" in value: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =value.split('-' ) else: _SCREAMING_SNAKE_CASE =re.sub('[0-9.]' , '' , _a ) _SCREAMING_SNAKE_CASE =float(re.sub('[^0-9.]' , '' , _a ) ) _SCREAMING_SNAKE_CASE =cls.NAMING_INFO['reverse_short_param'][p_k] _SCREAMING_SNAKE_CASE =p_v for k in cls.DEFAULTS: if k not in parameters: _SCREAMING_SNAKE_CASE =cls.DEFAULTS[k] return parameters
47
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings UpperCAmelCase__ : List[Any] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n" @add_start_docstrings(A__ ) class a__ ( A__ ): """simple docstring""" UpperCAmelCase__ : Optional[int] ="""rag""" UpperCAmelCase__ : Union[str, Any] =True def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=" / " , UpperCAmelCase__ : str=" // " , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Optional[int]=3_0_0 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : Dict="wiki_dpr" , UpperCAmelCase__ : Tuple="train" , UpperCAmelCase__ : Any="compressed" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : List[str] , ) ->str: """simple docstring""" super().__init__( bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""question_encoder""" ) SCREAMING_SNAKE_CASE : List[str] = question_encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""generator""" ) SCREAMING_SNAKE_CASE : Optional[Any] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE : List[str] = AutoConfig.for_model(_a , **_a ) SCREAMING_SNAKE_CASE : Any = AutoConfig.for_model(_a , **_a ) SCREAMING_SNAKE_CASE : Union[str, Any] = reduce_loss SCREAMING_SNAKE_CASE : List[Any] = label_smoothing SCREAMING_SNAKE_CASE : List[Any] = exclude_bos_score SCREAMING_SNAKE_CASE : str = do_marginalize SCREAMING_SNAKE_CASE : Union[str, Any] = title_sep SCREAMING_SNAKE_CASE : Optional[int] = doc_sep SCREAMING_SNAKE_CASE : Dict = n_docs SCREAMING_SNAKE_CASE : str = max_combined_length SCREAMING_SNAKE_CASE : List[str] = dataset SCREAMING_SNAKE_CASE : int = dataset_split SCREAMING_SNAKE_CASE : List[str] = index_name SCREAMING_SNAKE_CASE : int = retrieval_vector_size SCREAMING_SNAKE_CASE : Optional[Any] = retrieval_batch_size SCREAMING_SNAKE_CASE : int = passages_path SCREAMING_SNAKE_CASE : Any = index_path SCREAMING_SNAKE_CASE : List[Any] = use_dummy_dataset SCREAMING_SNAKE_CASE : Union[str, Any] = output_retrieved SCREAMING_SNAKE_CASE : Tuple = do_deduplication SCREAMING_SNAKE_CASE : Any = use_cache if self.forced_eos_token_id is None: SCREAMING_SNAKE_CASE : List[str] = getattr(self.generator , """forced_eos_token_id""" , _a ) @classmethod def _lowercase ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Tuple ) ->PretrainedConfig: """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a ) def _lowercase ( self : int ) ->List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.question_encoder.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.generator.to_dict() SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type return output
245
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase : Optional[int] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class A__ : A__ = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) A__ = field( default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) A__ = field( default=A__ , metadata={'help': 'The column name of the images in the files.'} ) A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} ) A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} ) A__ = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) A__ = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) A__ = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def A ( self : Union[str, Any] ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE ={} if self.train_dir is not None: _SCREAMING_SNAKE_CASE =self.train_dir if self.validation_dir is not None: _SCREAMING_SNAKE_CASE =self.validation_dir _SCREAMING_SNAKE_CASE =data_files if data_files else None @dataclass class A__ : A__ = field( default=A__ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) A__ = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) A__ = field( default=A__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) A__ = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) A__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} ) A__ = field( default=A__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) A__ = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) A__ = field( default=A__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class A__ ( A__ ): A__ = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def _lowerCAmelCase ( _UpperCamelCase : int ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _lowerCAmelCase ( ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _SCREAMING_SNAKE_CASE =training_args.get_process_log_level() logger.setLevel(_UpperCamelCase ) transformers.utils.logging.set_verbosity(_UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _SCREAMING_SNAKE_CASE =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _SCREAMING_SNAKE_CASE =load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _SCREAMING_SNAKE_CASE =None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0: _SCREAMING_SNAKE_CASE =ds['train'].train_test_split(data_args.train_val_split ) _SCREAMING_SNAKE_CASE =split['train'] _SCREAMING_SNAKE_CASE =split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE ={ 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase ) else: _SCREAMING_SNAKE_CASE =ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(f"New config: {config}" ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase ) else: _SCREAMING_SNAKE_CASE =ViTImageProcessor() # create model if model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _SCREAMING_SNAKE_CASE =ViTMAEForPreTraining(_UpperCamelCase ) if training_args.do_train: _SCREAMING_SNAKE_CASE =ds['train'].column_names else: _SCREAMING_SNAKE_CASE =ds['validation'].column_names if data_args.image_column_name is not None: _SCREAMING_SNAKE_CASE =data_args.image_column_name elif "image" in column_names: _SCREAMING_SNAKE_CASE ='image' elif "img" in column_names: _SCREAMING_SNAKE_CASE ='img' else: _SCREAMING_SNAKE_CASE =column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge'] else: _SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width']) _SCREAMING_SNAKE_CASE =Compose( [ Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_UpperCamelCase : Dict ): _SCREAMING_SNAKE_CASE =[transforms(_UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _SCREAMING_SNAKE_CASE =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _SCREAMING_SNAKE_CASE =( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_UpperCamelCase ) # Compute absolute learning rate _SCREAMING_SNAKE_CASE =( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _SCREAMING_SNAKE_CASE =training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _SCREAMING_SNAKE_CASE =Trainer( model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , ) # Training if training_args.do_train: _SCREAMING_SNAKE_CASE =None if training_args.resume_from_checkpoint is not None: _SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint elif last_checkpoint is not None: _SCREAMING_SNAKE_CASE =last_checkpoint _SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_UpperCamelCase ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _SCREAMING_SNAKE_CASE =trainer.evaluate() trainer.log_metrics('eval' , _UpperCamelCase ) trainer.save_metrics('eval' , _UpperCamelCase ) # Write model card and (optionally) push to hub _SCREAMING_SNAKE_CASE ={ 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCamelCase ) else: trainer.create_model_card(**_UpperCamelCase ) def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]: """simple docstring""" main() if __name__ == "__main__": main()
47
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( A__ ): snake_case_ : int = ["image_processor", "tokenizer"] snake_case_ : List[Any] = "FlavaImageProcessor" snake_case_ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Optional[Any] , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=None , **snake_case__ : List[str] ): """simple docstring""" _UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) _UpperCAmelCase = kwargs.pop("feature_extractor" ) _UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) _UpperCAmelCase = self.image_processor def __call__( self : List[str] , snake_case__ : Optional[ImageInput] = None , snake_case__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = False , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ): """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _UpperCAmelCase = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) if images is not None: _UpperCAmelCase = self.image_processor( _a , return_image_mask=_a , return_codebook_pixels=_a , return_tensors=_a , **_a , ) if text is not None and images is not None: encoding.update(_a ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*_a , **_a ) def UpperCamelCase ( self : List[str] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ): """simple docstring""" return self.tokenizer.decode(*_a , **_a ) @property def UpperCamelCase ( self : Any ): """simple docstring""" _UpperCAmelCase = self.tokenizer.model_input_names _UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self : Optional[int] ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
133
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class A__ ( A__ , A__ ): @register_to_config def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]: '''simple docstring''' super().__init__() _SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) ) _SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) ) def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) ) _SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) ) return self def A ( self : Tuple , _a : str ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std return embeds def A ( self : List[str] , _a : Optional[Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean return embeds
47
0
import glob import os import random from string import ascii_lowercase, digits import cva UpperCAmelCase_ = "" UpperCAmelCase_ = "" UpperCAmelCase_ = "" UpperCAmelCase_ = 1 # (0 is vertical, 1 is horizontal) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase, __lowerCamelCase = get_dataset(_UpperCamelCase , _UpperCamelCase ) print("""Processing...""" ) __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = update_image_and_anno(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for index, image in enumerate(_UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] __lowerCamelCase = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(f'/{file_root}.jpg' , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Success {index+1}/{len(_UpperCamelCase )} with {file_name}' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(_UpperCamelCase ) with open(f'/{file_root}.txt' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def lowerCamelCase__ ( A__ : str , A__ : str ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(_UpperCamelCase , """*.txt""" ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(_UpperCamelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(_UpperCamelCase , f'{label_name}.jpg' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCamelCase ) labels.append(_UpperCamelCase ) return img_paths, labels def lowerCamelCase__ ( A__ : list , A__ : list , A__ : int = 1 ): '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(_UpperCamelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(_UpperCamelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(_UpperCamelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCamelCase ) new_imgs_list.append(_UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def lowerCamelCase__ ( A__ : int = 32 ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
12
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =botoa.client('iam' ) _SCREAMING_SNAKE_CASE ={ 'Version': '2012-10-17', 'Statement': [ {'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) ) _SCREAMING_SNAKE_CASE ={ 'Version': '2012-10-17', 'Statement': [ { 'Effect': 'Allow', 'Action': [ 'sagemaker:*', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:BatchCheckLayerAvailability', 'ecr:GetAuthorizationToken', 'cloudwatch:PutMetricData', 'cloudwatch:GetMetricData', 'cloudwatch:GetMetricStatistics', 'cloudwatch:ListMetrics', 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogStreams', 'logs:PutLogEvents', 'logs:GetLogEvents', 's3:CreateBucket', 's3:ListBucket', 's3:GetBucketLocation', 's3:GetObject', 's3:PutObject', ], 'Resource': '*', } ], } # attach policy to role iam_client.put_role_policy( RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one" ) def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =botoa.client('iam' ) return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"] def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =_ask_options( 'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , ) _SCREAMING_SNAKE_CASE =None if credentials_configuration == 0: _SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' ) _SCREAMING_SNAKE_CASE =aws_profile else: print( 'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,' '`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' ) _SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' ) _SCREAMING_SNAKE_CASE =aws_access_key_id _SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' ) _SCREAMING_SNAKE_CASE =aws_secret_access_key _SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' ) _SCREAMING_SNAKE_CASE =aws_region _SCREAMING_SNAKE_CASE =_ask_options( 'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , ) if role_management == 0: _SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' ) else: _SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role' print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_custom_docker_image: _SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_sagemaker_inputs_enabled: _SCREAMING_SNAKE_CASE =_ask_field( 'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =None if is_sagemaker_metrics_enabled: _SCREAMING_SNAKE_CASE =_ask_field( 'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , ) _SCREAMING_SNAKE_CASE =_ask_options( 'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , ) _SCREAMING_SNAKE_CASE ={} _SCREAMING_SNAKE_CASE =_ask_field( 'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) if use_dynamo: _SCREAMING_SNAKE_CASE ='dynamo_' _SCREAMING_SNAKE_CASE =_ask_options( 'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) if use_custom_options: _SCREAMING_SNAKE_CASE =_ask_options( 'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE =_ask_field( 'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , ) _SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?' if distributed_type != SageMakerDistributedType.NO: _SCREAMING_SNAKE_CASE =_ask_options( _UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" _SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' ) _SCREAMING_SNAKE_CASE =1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): _SCREAMING_SNAKE_CASE =_ask_field( 'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , ) _SCREAMING_SNAKE_CASE =_ask_options( 'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( 'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' ) return SageMakerConfig( image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
47
0
import torch from transformers import AutoModel class __lowercase ( torch.nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any]="sayef/fsner-bert-base-uncased"): super(_a , self).__init__() SCREAMING_SNAKE_CASE_: str = AutoModel.from_pretrained(_a , return_dict=_a) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-08) SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Softmax(dim=1) def _SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase__ : str): return self.bert(**_a).last_hidden_state def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int): return token_embeddings.sum(2 , keepdim=_a) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=1): return self.softmax(T * self.cos(_a , _a)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: str = W_supports["sizes"].tolist() SCREAMING_SNAKE_CASE_: Any = W_supports["start_token_id"].item() SCREAMING_SNAKE_CASE_: Optional[Any] = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] SCREAMING_SNAKE_CASE_: str = self.BERT(**_a) SCREAMING_SNAKE_CASE_: List[Any] = self.BERT(**_a) SCREAMING_SNAKE_CASE_: str = None SCREAMING_SNAKE_CASE_: Optional[Any] = None SCREAMING_SNAKE_CASE_: int = W_supports["input_ids"] == start_token_id SCREAMING_SNAKE_CASE_: List[Any] = W_supports["input_ids"] == end_token_id for i, size in enumerate(_a): if i == 0: SCREAMING_SNAKE_CASE_: Any = 0 else: SCREAMING_SNAKE_CASE_: List[Any] = support_sizes[i - 1] SCREAMING_SNAKE_CASE_: Optional[Any] = S[s : s + size][start_token_masks[s : s + size]] SCREAMING_SNAKE_CASE_: str = S[s : s + size][end_token_masks[s : s + size]] SCREAMING_SNAKE_CASE_: Any = torch.matmul(q[i] , s_start.T).sum(1).softmax(0) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0) if p_starts is not None: SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.vstack((p_starts, p_start)) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.vstack((p_ends, p_end)) else: SCREAMING_SNAKE_CASE_: List[Any] = p_start SCREAMING_SNAKE_CASE_: str = p_end return p_starts, p_ends
13
'''simple docstring''' class A__ : def __init__( self : Union[str, Any] , _a : int ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =size _SCREAMING_SNAKE_CASE =[0] * size _SCREAMING_SNAKE_CASE =[0] * size @staticmethod def A ( _a : int ) -> int: '''simple docstring''' return index | (index + 1) @staticmethod def A ( _a : int ) -> int: '''simple docstring''' return (index & (index + 1)) - 1 def A ( self : Tuple , _a : int , _a : int ) -> None: '''simple docstring''' _SCREAMING_SNAKE_CASE =value while index < self.size: _SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1 if current_left_border == index: _SCREAMING_SNAKE_CASE =value else: _SCREAMING_SNAKE_CASE =max(_a , _a , _a ) _SCREAMING_SNAKE_CASE =self.get_next(_a ) def A ( self : int , _a : int , _a : int ) -> int: '''simple docstring''' right -= 1 # Because of right is exclusive _SCREAMING_SNAKE_CASE =0 while left <= right: _SCREAMING_SNAKE_CASE =self.get_prev(_a ) if left <= current_left: _SCREAMING_SNAKE_CASE =max(_a , self.tree[right] ) _SCREAMING_SNAKE_CASE =current_left else: _SCREAMING_SNAKE_CASE =max(_a , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
47
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case__ : List[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_( A__ ): __UpperCamelCase = ['''pixel_values'''] def __init__( self : int , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Any , ): super().__init__(**_a ) lowerCAmelCase : str = size if size is not None else {'''shortest_edge''': 2_2_4} lowerCAmelCase : Dict = get_size_dict(_a , default_to_square=_a ) lowerCAmelCase : str = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowerCAmelCase : Optional[Any] = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' ) lowerCAmelCase : List[str] = do_resize lowerCAmelCase : int = size lowerCAmelCase : Union[str, Any] = resample lowerCAmelCase : List[str] = do_center_crop lowerCAmelCase : int = crop_size lowerCAmelCase : Tuple = do_rescale lowerCAmelCase : Dict = rescale_factor lowerCAmelCase : List[Any] = do_normalize lowerCAmelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase : Optional[int] = do_convert_rgb def lowerCamelCase__ ( self : int , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ): lowerCAmelCase : str = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCAmelCase : int = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ): lowerCAmelCase : List[str] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : str , ): return rescale(_a , scale=_a , data_format=_a , **_a ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ): return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : int = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ): lowerCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase : Tuple = size if size is not None else self.size lowerCAmelCase : Tuple = get_size_dict(_a , param_name='''size''' , default_to_square=_a ) lowerCAmelCase : Dict = resample if resample is not None else self.resample lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase : str = crop_size if crop_size is not None else self.crop_size lowerCAmelCase : int = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a ) lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std lowerCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase : Any = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase : Union[str, Any] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase : Dict = [to_numpy_array(_a ) for image in images] if do_resize: lowerCAmelCase : Any = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_center_crop: lowerCAmelCase : Dict = [self.center_crop(image=_a , size=_a ) for image in images] if do_rescale: lowerCAmelCase : str = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCAmelCase : Dict = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCAmelCase : Dict = [to_channel_dimension_format(_a , _a ) for image in images] lowerCAmelCase : int = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a )
60
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCamelCase : Union[str, Any] = TypeVar("KT") lowerCamelCase : Dict = TypeVar("VT") class A__ ( Generic[KT, VT] ): def __init__( self : str , _a : KT | str = "root" , _a : VT | None = None ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =key _SCREAMING_SNAKE_CASE =value _SCREAMING_SNAKE_CASE =[] def __repr__( self : Union[str, Any] ) -> str: '''simple docstring''' return f"Node({self.key}: {self.value})" @property def A ( self : int ) -> int: '''simple docstring''' return len(self.forward ) class A__ ( Generic[KT, VT] ): def __init__( self : Optional[Any] , _a : float = 0.5 , _a : int = 16 ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =Node[KT, VT]() _SCREAMING_SNAKE_CASE =0 _SCREAMING_SNAKE_CASE =p _SCREAMING_SNAKE_CASE =max_level def __str__( self : Tuple ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =list(self ) if len(_a ) == 0: return f"SkipList(level={self.level})" _SCREAMING_SNAKE_CASE =max((len(str(_a ) ) for item in items) , default=4 ) _SCREAMING_SNAKE_CASE =max(_a , 4 ) + 4 _SCREAMING_SNAKE_CASE =self.head _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =node.forward.copy() lines.append(f"[{node.key}]".ljust(_a , '-' ) + '* ' * len(_a ) ) lines.append(' ' * label_size + '| ' * len(_a ) ) while len(node.forward ) != 0: _SCREAMING_SNAKE_CASE =node.forward[0] lines.append( f"[{node.key}]".ljust(_a , '-' ) + ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) ) lines.append(' ' * label_size + '| ' * len(_a ) ) _SCREAMING_SNAKE_CASE =node.forward lines.append('None'.ljust(_a ) + '* ' * len(_a ) ) return f"SkipList(level={self.level})\n" + "\n".join(_a ) def __iter__( self : Dict ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.head while len(node.forward ) != 0: yield node.forward[0].key _SCREAMING_SNAKE_CASE =node.forward[0] def A ( self : List[Any] ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =1 while random() < self.p and level < self.max_level: level += 1 return level def A ( self : Any , _a : Any ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: '''simple docstring''' _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _SCREAMING_SNAKE_CASE =node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(_a ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def A ( self : Union[str, Any] , _a : KT ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a ) if node is not None: for i, update_node in enumerate(_a ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _SCREAMING_SNAKE_CASE =node.forward[i] else: _SCREAMING_SNAKE_CASE =update_node.forward[:i] def A ( self : Optional[Any] , _a : KT , _a : VT ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a ) if node is not None: _SCREAMING_SNAKE_CASE =value else: _SCREAMING_SNAKE_CASE =self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , _a ): update_vector.append(self.head ) _SCREAMING_SNAKE_CASE =level _SCREAMING_SNAKE_CASE =Node(_a , _a ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(_a ) else: _SCREAMING_SNAKE_CASE =new_node def A ( self : List[str] , _a : VT ) -> VT | None: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._locate_node(_a ) if node is not None: return node.value return None def _lowerCAmelCase ( ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key1' , 3 ) skip_list.insert('Key2' , 12 ) skip_list.insert('Key3' , 41 ) skip_list.insert('Key4' , -19 ) _SCREAMING_SNAKE_CASE =skip_list.head _SCREAMING_SNAKE_CASE ={} while node.level != 0: _SCREAMING_SNAKE_CASE =node.forward[0] _SCREAMING_SNAKE_CASE =node.value assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key1' , 10 ) skip_list.insert('Key1' , 12 ) skip_list.insert('Key5' , 7 ) skip_list.insert('Key7' , 10 ) skip_list.insert('Key10' , 5 ) skip_list.insert('Key7' , 7 ) skip_list.insert('Key5' , 5 ) skip_list.insert('Key10' , 10 ) _SCREAMING_SNAKE_CASE =skip_list.head _SCREAMING_SNAKE_CASE ={} while node.level != 0: _SCREAMING_SNAKE_CASE =node.forward[0] _SCREAMING_SNAKE_CASE =node.value if len(_UpperCamelCase ) != 4: print() assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def _lowerCAmelCase ( ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() assert skip_list.find('Some key' ) is None def _lowerCAmelCase ( ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key2' , 20 ) assert skip_list.find('Key2' ) == 20 skip_list.insert('Some Key' , 10 ) skip_list.insert('Key2' , 8 ) skip_list.insert('V' , 13 ) assert skip_list.find('Y' ) is None assert skip_list.find('Key2' ) == 8 assert skip_list.find('Some Key' ) == 10 assert skip_list.find('V' ) == 13 def _lowerCAmelCase ( ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.delete('Some key' ) assert len(skip_list.head.forward ) == 0 def _lowerCAmelCase ( ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 14 ) skip_list.insert('Key2' , 15 ) skip_list.delete('V' ) skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('Key2' ) is None def _lowerCAmelCase ( ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 14 ) skip_list.insert('Key2' , 15 ) skip_list.delete('V' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) == 14 assert skip_list.find('Key1' ) == 12 assert skip_list.find('Key2' ) == 15 skip_list.delete('X' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) == 12 assert skip_list.find('Key2' ) == 15 skip_list.delete('Key1' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) == 15 skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) is None def _lowerCAmelCase ( ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 1_42 ) skip_list.insert('Key2' , 15 ) skip_list.delete('X' ) def traverse_keys(_UpperCamelCase : Dict ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCamelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def _lowerCAmelCase ( ) -> Union[str, Any]: """simple docstring""" def is_sorted(_UpperCamelCase : str ): return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) ) _SCREAMING_SNAKE_CASE =SkipList() for i in range(10 ): skip_list.insert(_UpperCamelCase , _UpperCamelCase ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCamelCase ) ) def _lowerCAmelCase ( ) -> List[str]: """simple docstring""" for _ in range(1_00 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def _lowerCAmelCase ( ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =SkipList() skip_list.insert(2 , '2' ) skip_list.insert(4 , '4' ) skip_list.insert(6 , '4' ) skip_list.insert(4 , '5' ) skip_list.insert(8 , '4' ) skip_list.insert(9 , '4' ) skip_list.delete(4 ) print(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
47
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowerCamelCase_ = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCamelCase_ = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCamelCase_ = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } lowerCamelCase_ = { "facebook/dpr-ctx_encoder-single-nq-base": 5_1_2, "facebook/dpr-ctx_encoder-multiset-base": 5_1_2, } lowerCamelCase_ = { "facebook/dpr-question_encoder-single-nq-base": 5_1_2, "facebook/dpr-question_encoder-multiset-base": 5_1_2, } lowerCamelCase_ = { "facebook/dpr-reader-single-nq-base": 5_1_2, "facebook/dpr-reader-multiset-base": 5_1_2, } lowerCamelCase_ = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } lowerCamelCase_ = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } lowerCamelCase_ = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class _SCREAMING_SNAKE_CASE( A__ ): SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ : int = DPRContextEncoderTokenizer class _SCREAMING_SNAKE_CASE( A__ ): SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ : List[Any] = DPRQuestionEncoderTokenizer lowerCamelCase_ = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) lowerCamelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) lowerCamelCase_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(A__ ) class _SCREAMING_SNAKE_CASE: def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( _a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,) elif titles is None or texts is None: __SCREAMING_SNAKE_CASE :Tuple = titles if texts is None else texts return super().__call__( _a ,_a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,) __SCREAMING_SNAKE_CASE :List[Any] = titles if not isinstance(_a ,_a ) else [titles] __SCREAMING_SNAKE_CASE :Optional[Any] = texts if not isinstance(_a ,_a ) else [texts] __SCREAMING_SNAKE_CASE :int = len(_a ) __SCREAMING_SNAKE_CASE :List[Any] = questions if not isinstance(_a ,_a ) else [questions] * n_passages assert len(_a ) == len( _a ), f'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' __SCREAMING_SNAKE_CASE :Optional[Any] = super().__call__(_a ,_a ,padding=_a ,truncation=_a )['''input_ids'''] __SCREAMING_SNAKE_CASE :Union[str, Any] = super().__call__(_a ,add_special_tokens=_a ,padding=_a ,truncation=_a )['''input_ids'''] __SCREAMING_SNAKE_CASE :str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_a ,_a ) ] } if return_attention_mask is not False: __SCREAMING_SNAKE_CASE :List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __SCREAMING_SNAKE_CASE :Optional[int] = attention_mask return self.pad(_a ,padding=_a ,max_length=_a ,return_tensors=_a ) def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 16 ,SCREAMING_SNAKE_CASE__ = 64 ,SCREAMING_SNAKE_CASE__ = 4 ,) -> List[DPRSpanPrediction]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = reader_input['''input_ids'''] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = reader_output[:3] __SCREAMING_SNAKE_CASE :List[Any] = len(_a ) __SCREAMING_SNAKE_CASE :Optional[int] = sorted(range(_a ) ,reverse=_a ,key=relevance_logits.__getitem__ ) __SCREAMING_SNAKE_CASE :str = [] for doc_id in sorted_docs: __SCREAMING_SNAKE_CASE :Dict = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __SCREAMING_SNAKE_CASE :Optional[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __SCREAMING_SNAKE_CASE :Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __SCREAMING_SNAKE_CASE :List[str] = len(_a ) __SCREAMING_SNAKE_CASE :List[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_a ,top_spans=_a ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_a ,start_index=_a ,end_index=_a ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_a ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> List[DPRSpanPrediction]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[Any] = [] for start_index, start_score in enumerate(_a ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __SCREAMING_SNAKE_CASE :List[Any] = sorted(_a ,key=lambda SCREAMING_SNAKE_CASE__ : x[1] ,reverse=_a ) __SCREAMING_SNAKE_CASE :List[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' __SCREAMING_SNAKE_CASE :Optional[Any] = end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_a ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A__ ) class _SCREAMING_SNAKE_CASE( A__ , A__ ): SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Dict = READER_PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE_ : Optional[Any] = DPRReaderTokenizer
191
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowerCamelCase : List[Any] = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize lowerCamelCase : Any = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" lowerCamelCase : Optional[Any] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" lowerCamelCase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def A ( self : Tuple ) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def A ( self : Union[str, Any] , _a : Union[str, Any] ) -> Optional[int]: '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def A ( self : int , _a : Tuple , _a : List[str] , _a : List[str]=0.9 , _a : Dict=3 , _a : Optional[int]=0.5 ) -> Optional[int]: '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): _SCREAMING_SNAKE_CASE =[ meteor_score.single_meteor_score( word_tokenize(_a ) , word_tokenize(_a ) , alpha=_a , beta=_a , gamma=_a ) for ref, pred in zip(_a , _a ) ] else: _SCREAMING_SNAKE_CASE =[ meteor_score.single_meteor_score(_a , _a , alpha=_a , beta=_a , gamma=_a ) for ref, pred in zip(_a , _a ) ] return {"meteor": np.mean(_a )}
47
0
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.txt"} lowerCAmelCase : Dict = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } lowerCAmelCase : int = { "openbmb/cpm-ant-10b": 10_24, } def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = collections.OrderedDict() with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader: SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.readlines() for index, token in enumerate(_UpperCamelCase ): SCREAMING_SNAKE_CASE_ : List[Any] = token.rstrip('\n' ) SCREAMING_SNAKE_CASE_ : List[str] = index return vocab class _A ( A__): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=200 ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = vocab SCREAMING_SNAKE_CASE_ : List[str] = unk_token SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_input_chars_per_word def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = list(_a ) if len(_a ) > self.max_input_chars_per_word: return [self.unk_token] SCREAMING_SNAKE_CASE_ : Tuple = 0 SCREAMING_SNAKE_CASE_ : Dict = [] while start < len(_a ): SCREAMING_SNAKE_CASE_ : Tuple = len(_a ) SCREAMING_SNAKE_CASE_ : Dict = None while start < end: SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(chars[start:end] ) if substr in self.vocab: SCREAMING_SNAKE_CASE_ : int = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(_a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = end return sub_tokens class _A ( A__): SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE : Dict = False def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" requires_backends(self , ['jieba'] ) super().__init__( bod_token=_a , eod_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , unk_token=_a , line_token=_a , space_token=_a , padding_side=_a , **_a , ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = bod_token SCREAMING_SNAKE_CASE_ : Optional[Any] = eod_token SCREAMING_SNAKE_CASE_ : List[Any] = load_vocab(_a ) SCREAMING_SNAKE_CASE_ : List[Any] = self.encoder[space_token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] SCREAMING_SNAKE_CASE_ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def UpperCAmelCase ( self ): """simple docstring""" return self.encoder[self.bod_token] @property def UpperCAmelCase ( self ): """simple docstring""" return self.encoder[self.eod_token] @property def UpperCAmelCase ( self ): """simple docstring""" return self.encoder["\n"] @property def UpperCAmelCase ( self ): """simple docstring""" return len(self.encoder ) def UpperCAmelCase ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [] for x in jieba.cut(_a , cut_all=_a ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) ) return output_tokens def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [i for i in token_ids if i >= 0] SCREAMING_SNAKE_CASE_ : List[str] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(_a , **_a ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return token in self.encoder def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join(_a ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.encoder.get(_a , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ): """simple docstring""" return self.decoder.get(_a , self.unk_token ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" if os.path.isdir(_a ): SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join( _a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: SCREAMING_SNAKE_CASE_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 if " " in self.encoder: SCREAMING_SNAKE_CASE_ : Tuple = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: SCREAMING_SNAKE_CASE_ : str = self.encoder['\n'] del self.encoder["\n"] SCREAMING_SNAKE_CASE_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) ) with open(_a , 'w' , encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!' ) SCREAMING_SNAKE_CASE_ : List[str] = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is not None: return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) return [1] + ([0] * len(_a ))
253
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[Any] = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class A__ ( A__ , A__ ): A__ = 'convnextv2' def __init__( self : Tuple , _a : Optional[int]=3 , _a : Any=4 , _a : int=4 , _a : Union[str, Any]=None , _a : List[str]=None , _a : Optional[Any]="gelu" , _a : Any=0.02 , _a : Any=1e-12 , _a : Tuple=0.0 , _a : int=224 , _a : Any=None , _a : Optional[int]=None , **_a : List[str] , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_a ) _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =patch_size _SCREAMING_SNAKE_CASE =num_stages _SCREAMING_SNAKE_CASE =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes _SCREAMING_SNAKE_CASE =[3, 3, 9, 3] if depths is None else depths _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps _SCREAMING_SNAKE_CASE =drop_path_rate _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
47
0
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) _lowerCAmelCase : Any = logging.getLogger(__name__) _lowerCAmelCase : Optional[Any] = tf.data.AUTOTUNE def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=_UpperCamelCase , default="roberta-base" , help="The model config to use. Note that we don\'t copy the model\'s weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=_UpperCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=_UpperCamelCase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=_UpperCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=_UpperCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=_UpperCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=_UpperCamelCase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=_UpperCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=_UpperCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=_UpperCamelCase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=_UpperCamelCase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=_UpperCamelCase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=_UpperCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=_UpperCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=_UpperCamelCase , help="Model ID to upload to on the Hugging Face Hub." ) UpperCAmelCase__ = parser.parse_args() return args def lowerCAmelCase ( _lowerCAmelCase : Any ): """simple docstring""" try: if args.tpu_name: UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: UpperCAmelCase__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(_UpperCamelCase ) tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase ) return tpu def lowerCAmelCase ( _lowerCAmelCase : List[Any] ): """simple docstring""" UpperCAmelCase__ = 0 for file in file_list: UpperCAmelCase__ = file.split("/" )[-1] UpperCAmelCase__ = re.search(R"-\d+-(\d+)\.tfrecord" , _UpperCamelCase ).group(1 ) UpperCAmelCase__ = int(_UpperCamelCase ) num_samples += sample_count return num_samples def lowerCAmelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str=None ): """simple docstring""" UpperCAmelCase__ = count_samples(_UpperCamelCase ) UpperCAmelCase__ = tf.data.Dataset.from_tensor_slices(_UpperCamelCase ) if shuffle: UpperCAmelCase__ = dataset.shuffle(len(_UpperCamelCase ) ) UpperCAmelCase__ = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here UpperCAmelCase__ = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) ) UpperCAmelCase__ = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase ) if shuffle: assert shuffle_buffer_size is not None UpperCAmelCase__ = dataset.shuffle(args.shuffle_buffer_size ) UpperCAmelCase__ = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase ) UpperCAmelCase__ = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase ) UpperCAmelCase__ = dataset.prefetch(_UpperCamelCase ) return dataset def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" if not args.no_tpu: UpperCAmelCase__ = initialize_tpu(_UpperCamelCase ) UpperCAmelCase__ = tf.distribute.TPUStrategy(_UpperCamelCase ) else: UpperCAmelCase__ = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer ) UpperCAmelCase__ = AutoConfig.from_pretrained(args.pretrained_model_config ) UpperCAmelCase__ = tokenizer.vocab_size UpperCAmelCase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' ) UpperCAmelCase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' ) UpperCAmelCase__ = count_samples(_UpperCamelCase ) UpperCAmelCase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) UpperCAmelCase__ = steps_per_epoch * args.num_epochs with strategy.scope(): UpperCAmelCase__ = TFAutoModelForMaskedLM.from_config(_UpperCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built UpperCAmelCase__ , UpperCAmelCase__ = create_optimizer( num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_UpperCamelCase , metrics=["accuracy"] ) def decode_fn(_lowerCAmelCase : Any ): UpperCAmelCase__ = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. UpperCAmelCase__ = DataCollatorForLanguageModeling( tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors="tf" ) def mask_with_collator(_lowerCAmelCase : List[str] ): # TF really needs an isin() function UpperCAmelCase__ = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) UpperCAmelCase__ , UpperCAmelCase__ = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , ) return batch UpperCAmelCase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync UpperCAmelCase__ = prepare_dataset( _UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) UpperCAmelCase__ = prepare_dataset( _UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , ) UpperCAmelCase__ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) ) model.fit( _UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = parse_args() main(args)
169
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : int = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ["GLPNFeatureExtractor"] lowerCamelCase : Optional[int] = ["GLPNImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
0
def UpperCAmelCase_ ( __snake_case = 50 ) -> int: """simple docstring""" _lowercase =[1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
5
'''simple docstring''' lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}] lowerCamelCase : str = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
47
0
'''simple docstring''' from collections.abc import Callable import numpy as np def _a( UpperCamelCase__ : Callable, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__ : Dict =ya SCREAMING_SNAKE_CASE__ : Optional[int] =xa for k in range(_UpperCamelCase ): SCREAMING_SNAKE_CASE__ : List[str] =y[k] + step_size * ode_func(_UpperCamelCase, y[k] ) SCREAMING_SNAKE_CASE__ : Dict =y[k] + ( (step_size / 2) * (ode_func(_UpperCamelCase, y[k] ) + ode_func(x + step_size, _UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
152
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowerCamelCase : Optional[int] = False class A__ ( unittest.TestCase ): pass @slow @require_torch_gpu class A__ ( unittest.TestCase ): def A ( self : Tuple ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( image=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _SCREAMING_SNAKE_CASE =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _SCREAMING_SNAKE_CASE =np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
47
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = 1 lowercase__ : str = 3 lowercase__ : Dict = (3_2, 3_2) lowercase__ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a ) return image @property def _UpperCAmelCase ( self ) -> int: torch.manual_seed(0 ) lowercase__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=_a , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , ) return model @property def _UpperCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) lowercase__ : Dict = AutoencoderKL( block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def _UpperCAmelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) lowercase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) return CLIPTextModel(_a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator lowercase__ : Optional[Any] = self.dummy_cond_unet_upscale lowercase__ : Tuple = DDPMScheduler() lowercase__ : List[Any] = DDIMScheduler(prediction_type='v_prediction' ) lowercase__ : Tuple = self.dummy_vae lowercase__ : Union[str, Any] = self.dummy_text_encoder lowercase__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : Tuple = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk lowercase__ : Any = StableDiffusionUpscalePipeline( unet=_a , low_res_scheduler=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , max_noise_level=3_5_0 , ) lowercase__ : Dict = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowercase__ : Any = 'A painting of a squirrel eating a burger' lowercase__ : List[str] = torch.Generator(device=_a ).manual_seed(0 ) lowercase__ : Optional[int] = sd_pipe( [prompt] , image=_a , generator=_a , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) lowercase__ : List[str] = output.images lowercase__ : int = torch.Generator(device=_a ).manual_seed(0 ) lowercase__ : List[Any] = sd_pipe( [prompt] , image=_a , generator=_a , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=_a , )[0] lowercase__ : List[Any] = image[0, -3:, -3:, -1] lowercase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] lowercase__ : Tuple = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) lowercase__ : Dict = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator lowercase__ : Dict = self.dummy_cond_unet_upscale lowercase__ : Dict = DDPMScheduler() lowercase__ : List[Any] = DDIMScheduler(prediction_type='v_prediction' ) lowercase__ : Optional[Any] = self.dummy_vae lowercase__ : str = self.dummy_text_encoder lowercase__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : List[Any] = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk lowercase__ : List[str] = StableDiffusionUpscalePipeline( unet=_a , low_res_scheduler=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , max_noise_level=3_5_0 , ) lowercase__ : Dict = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowercase__ : int = 'A painting of a squirrel eating a burger' lowercase__ : int = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) lowercase__ : Tuple = output.images assert image.shape[0] == 2 lowercase__ : Dict = torch.Generator(device=_a ).manual_seed(0 ) lowercase__ : List[Any] = sd_pipe( [prompt] , image=_a , generator=_a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) lowercase__ : str = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[str] = self.dummy_cond_unet_upscale lowercase__ : Dict = DDPMScheduler() lowercase__ : Optional[Any] = DDIMScheduler(prediction_type='v_prediction' ) lowercase__ : Union[str, Any] = self.dummy_vae lowercase__ : Optional[Any] = self.dummy_text_encoder lowercase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ : List[Any] = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((6_4, 6_4) ) # put models in fp16, except vae as it overflows in fp16 lowercase__ : Optional[int] = unet.half() lowercase__ : List[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk lowercase__ : Any = StableDiffusionUpscalePipeline( unet=_a , low_res_scheduler=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , max_noise_level=3_5_0 , ) lowercase__ : Optional[Any] = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) lowercase__ : Any = 'A painting of a squirrel eating a burger' lowercase__ : str = torch.manual_seed(0 ) lowercase__ : Union[str, Any] = sd_pipe( [prompt] , image=_a , generator=_a , num_inference_steps=2 , output_type='np' , ).images lowercase__ : Union[str, Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) lowercase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat.npy' ) lowercase__ : Dict = 'stabilityai/stable-diffusion-x4-upscaler' lowercase__ : Dict = StableDiffusionUpscalePipeline.from_pretrained(_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() lowercase__ : Optional[Any] = 'a cat sitting on a park bench' lowercase__ : Tuple = torch.manual_seed(0 ) lowercase__ : Any = pipe( prompt=_a , image=_a , generator=_a , output_type='np' , ) lowercase__ : Optional[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) lowercase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat_fp16.npy' ) lowercase__ : str = 'stabilityai/stable-diffusion-x4-upscaler' lowercase__ : int = StableDiffusionUpscalePipeline.from_pretrained( _a , torch_dtype=torch.floataa , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() lowercase__ : str = 'a cat sitting on a park bench' lowercase__ : List[Any] = torch.manual_seed(0 ) lowercase__ : Union[str, Any] = pipe( prompt=_a , image=_a , generator=_a , output_type='np' , ) lowercase__ : Any = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _UpperCAmelCase ( self ) -> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) lowercase__ : Dict = 'stabilityai/stable-diffusion-x4-upscaler' lowercase__ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained( _a , torch_dtype=torch.floataa , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ : List[Any] = 'a cat sitting on a park bench' lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : Optional[Any] = pipe( prompt=_a , image=_a , generator=_a , num_inference_steps=5 , output_type='np' , ) lowercase__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 1_0**9
77
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Any = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available SCREAMING_SNAKE_CASE__ : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A ( _SCREAMING_SNAKE_CASE ) -> tuple: return (data["data"], data["target"]) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Predict target for test data lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 ) return predictions def A ( ) -> None: lowerCamelCase : Dict = fetch_california_housing() lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 ) lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right SCREAMING_SNAKE_CASE__ : Tuple = 128022 SCREAMING_SNAKE_CASE__ : Optional[Any] = 128028 @require_sentencepiece class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = MaMaaaTokenizer lowerCamelCase_ : List[str] = False lowerCamelCase_ : Any = False lowerCamelCase_ : List[str] = True def _lowercase ( self ) -> List[str]: super().setUp() lowerCamelCase : Union[str, Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] lowerCamelCase : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase : List[str] = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self , **UpperCamelCase__ ) -> Union[str, Any]: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[Any]: return ( "This is a test", "This is a test", ) def _lowercase ( self ) -> Dict: lowerCamelCase : str = "</s>" lowerCamelCase : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : Tuple = self.get_tokenizer() lowerCamelCase : int = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _lowercase ( self ) -> int: pass def _lowercase ( self ) -> Dict: lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , ) lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) lowerCamelCase : str = tokenizer.convert_tokens_to_string(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , "This is a test" ) @slow def _lowercase ( self ) -> Optional[int]: # fmt: off lowerCamelCase : Optional[int] = {"input_ids": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """facebook/m2m100_418M""" lowerCamelCase_ : Optional[int] = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] lowerCamelCase_ : List[Any] = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off lowerCamelCase_ : List[str] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def _lowercase ( cls ) -> List[Any]: lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) lowerCamelCase : Dict = 1 return cls def _lowercase ( self ) -> Tuple: self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 12_8063 ) def _lowercase ( self ) -> Dict: lowerCamelCase : List[str] = self.tokenizer.get_vocab() self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase__ ) def _lowercase ( self ) -> Tuple: lowerCamelCase : Any = "en" lowerCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ ) def _lowercase ( self ) -> int: self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids ) # fmt: off lowerCamelCase : Any = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on lowerCamelCase : List[str] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : List[Any] = tempfile.mkdtemp() lowerCamelCase : List[Any] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ ) @require_torch def _lowercase ( self ) -> Any: lowerCamelCase : Any = "en" lowerCamelCase : Optional[Any] = "fr" lowerCamelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt" ) lowerCamelCase : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: lowerCamelCase : Any = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _lowercase ( self ) -> Optional[int]: lowerCamelCase : Union[str, Any] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) lowerCamelCase : List[str] = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _lowercase ( self ) -> Optional[int]: lowerCamelCase : List[Any] = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowerCamelCase : Dict = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[12_8022, 58, 4183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 12_8006, } , )
48
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Any = tempfile.mkdtemp() lowerCamelCase : Union[str, Any] = BlipImageProcessor() lowerCamelCase : Dict = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) lowerCamelCase : Tuple = BlipaProcessor(UpperCamelCase__ , UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def _lowercase ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def _lowercase ( self ) -> str: lowerCamelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase : int = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self ) -> int: lowerCamelCase : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCamelCase : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowerCamelCase : List[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase : Any = self.get_image_processor() lowerCamelCase : List[str] = self.get_tokenizer() lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase : Dict = self.prepare_image_inputs() lowerCamelCase : Dict = image_processor(UpperCamelCase__ , return_tensors="np" ) lowerCamelCase : Dict = processor(images=UpperCamelCase__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowercase ( self ) -> str: lowerCamelCase : Tuple = self.get_image_processor() lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Tuple = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = "lower newer" lowerCamelCase : str = processor(text=UpperCamelCase__ ) lowerCamelCase : Dict = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ) -> Any: lowerCamelCase : Optional[Any] = self.get_image_processor() lowerCamelCase : Any = self.get_tokenizer() lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase : Tuple = "lower newer" lowerCamelCase : List[str] = self.prepare_image_inputs() lowerCamelCase : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = self.get_image_processor() lowerCamelCase : Union[str, Any] = self.get_tokenizer() lowerCamelCase : Optional[Any] = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase : List[str] = processor.batch_decode(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[int] = self.get_image_processor() lowerCamelCase : Optional[Any] = self.get_tokenizer() lowerCamelCase : Tuple = BlipaProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase : str = "lower newer" lowerCamelCase : str = self.prepare_image_inputs() lowerCamelCase : Dict = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
from ..utils import DummyObject, requires_backends class UpperCamelCase__ (metaclass=lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = ["""onnx"""] def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple: requires_backends(self , ["onnx"] ) @classmethod def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: requires_backends(cls , ["onnx"] ) @classmethod def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict: requires_backends(cls , ["onnx"] )
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir('fixtures') class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> List[str]: # A mock response for an HTTP head request to emulate server down lowerCamelCase : Optional[Any] = mock.Mock() lowerCamelCase : Union[str, Any] = 500 lowerCamelCase : str = {} lowerCamelCase : Any = HTTPError lowerCamelCase : List[Any] = {} # Download this model to make sure it's in the cache. lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head: lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @classmethod def _lowercase ( cls ) -> Optional[Any]: lowerCamelCase : Any = TOKEN HfFolder.save_token(UpperCamelCase__ ) @classmethod def _lowercase ( cls ) -> Tuple: try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( UpperCamelCase__ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token ) lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( UpperCamelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token ) lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) ) def _lowercase ( self ) -> Dict: CustomFeatureExtractor.register_for_auto_class() lowerCamelCase : List[str] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained( F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=UpperCamelCase__ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
48
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : Tuple = 1 lowerCamelCase : int = 1 lowerCamelCase : Optional[Any] = {1: 1} for inputa in range(2 ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCamelCase : str = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCamelCase : str = counter if counter > pre_counter: lowerCamelCase : str = inputa lowerCamelCase : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
48
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer SCREAMING_SNAKE_CASE__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : Any = { 'vocab_file': { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt', }, 'tokenizer_file': { 'unc-nlp/lxmert-base-uncased': ( 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ : Dict = { 'unc-nlp/lxmert-base-uncased': 512, } SCREAMING_SNAKE_CASE__ : Tuple = { 'unc-nlp/lxmert-base-uncased': {'do_lower_case': True}, } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[Any] = LxmertTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]: super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars ): lowerCamelCase : Optional[int] = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) ) lowerCamelCase : Optional[int] = do_lower_case lowerCamelCase : int = strip_accents lowerCamelCase : Union[str, Any] = tokenize_chinese_chars lowerCamelCase : Any = normalizer_class(**UpperCamelCase__ ) lowerCamelCase : Tuple = do_lower_case def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Any: lowerCamelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : List[str] = [self.sep_token_id] lowerCamelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: lowerCamelCase : str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ )
48
import argparse import os import re SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"') def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int: with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f: lowerCamelCase : List[Any] = f.read() lowerCamelCase : str = content.split("\n" ) lowerCamelCase : int = [] lowerCamelCase : List[Any] = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase : Optional[int] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase : List[str] = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(_SCREAMING_SNAKE_CASE ) ) elif "\n".join(_SCREAMING_SNAKE_CASE ) != content: return True def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]: lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )] lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames] if not overwrite and any(_SCREAMING_SNAKE_CASE ): lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
48
1
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) SCREAMING_SNAKE_CASE__ : List[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) SCREAMING_SNAKE_CASE__ : Optional[int] = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) SCREAMING_SNAKE_CASE__ : Optional[int] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) SCREAMING_SNAKE_CASE__ : Dict = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def A ( ) -> List[Any]: lowerCamelCase , lowerCamelCase : Tuple = randrange(len(_SCREAMING_SNAKE_CASE ) ), randrange(len(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Optional[Any] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] lowerCamelCase , lowerCamelCase : Any = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def A ( _SCREAMING_SNAKE_CASE = 100 ) -> Tuple: return (generate_random_hand() for _ in range(_SCREAMING_SNAKE_CASE )) @pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: assert PokerHand(_SCREAMING_SNAKE_CASE )._is_flush() == expected @pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: assert PokerHand(_SCREAMING_SNAKE_CASE )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : str = PokerHand(_SCREAMING_SNAKE_CASE ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: assert PokerHand(_SCREAMING_SNAKE_CASE )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: assert PokerHand(_SCREAMING_SNAKE_CASE )._hand_type == expected @pytest.mark.parametrize("hand, other, expected" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected @pytest.mark.parametrize("hand, other, expected" ,generate_random_hands() ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: assert PokerHand(_SCREAMING_SNAKE_CASE ).compare_with(PokerHand(_SCREAMING_SNAKE_CASE ) ) == expected def A ( ) -> Tuple: lowerCamelCase : Union[str, Any] = [PokerHand(_SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS] lowerCamelCase : int = poker_hands.copy() shuffle(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = chain(sorted(_SCREAMING_SNAKE_CASE ) ) for index, hand in enumerate(_SCREAMING_SNAKE_CASE ): assert hand == poker_hands[index] def A ( ) -> List[Any]: # Test that five high straights are compared correctly. lowerCamelCase : List[str] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=_SCREAMING_SNAKE_CASE ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def A ( ) -> Optional[int]: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. lowerCamelCase : Any = PokerHand("2C 4S AS 3D 5C" ) lowerCamelCase : Optional[int] = True lowerCamelCase : List[str] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def A ( ) -> Union[str, Any]: # Problem number 54 from Project Euler # Testing from poker_hands.txt file lowerCamelCase : Any = 0 lowerCamelCase : str = os.path.abspath(os.path.dirname(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"poker_hands.txt" ) with open(_SCREAMING_SNAKE_CASE ) as file_hand: for line in file_hand: lowerCamelCase : Tuple = line[:14].strip() lowerCamelCase : Any = line[15:].strip() lowerCamelCase , lowerCamelCase : int = PokerHand(_SCREAMING_SNAKE_CASE ), PokerHand(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = player.compare_with(_SCREAMING_SNAKE_CASE ) if output == "Win": answer += 1 assert answer == 376
48
def A ( _SCREAMING_SNAKE_CASE ) -> list: if n_term == "": return [] lowerCamelCase : list = [] for temp in range(int(_SCREAMING_SNAKE_CASE ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
48
1
from graphs.minimum_spanning_tree_kruskal import kruskal def A ( ) -> List[Any]: lowerCamelCase : Optional[int] = 9 lowerCamelCase : List[str] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowerCamelCase : Tuple = kruskal(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(_SCREAMING_SNAKE_CASE ) == sorted(_SCREAMING_SNAKE_CASE )
48
from __future__ import annotations import requests def A ( _SCREAMING_SNAKE_CASE ) -> dict: lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_SCREAMING_SNAKE_CASE ).json() def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]: lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids] def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str: lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE ) return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
48
1
# Imports import numpy as np class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[Any]: self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]: if red is not None: lowerCamelCase : Any = red if green is not None: lowerCamelCase : List[str] = green if blue is not None: lowerCamelCase : str = blue if red_edge is not None: lowerCamelCase : Tuple = red_edge if nir is not None: lowerCamelCase : List[str] = nir return True def _lowercase ( self , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]: self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ ) lowerCamelCase : str = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def _lowercase ( self ) -> Optional[Any]: return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _lowercase ( self ) -> int: return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _lowercase ( self ) -> Optional[Any]: return self.nir * (self.red / (self.green**2)) def _lowercase ( self ) -> Any: return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _lowercase ( self ) -> List[Any]: return (self.nir - self.red) / (self.nir + self.red) def _lowercase ( self ) -> Any: return (self.nir - self.blue) / (self.nir + self.blue) def _lowercase ( self ) -> Any: return (self.redEdge - self.red) / (self.redEdge + self.red) def _lowercase ( self ) -> List[Any]: return (self.nir - self.green) / (self.nir + self.green) def _lowercase ( self ) -> Optional[Any]: return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _lowercase ( self ) -> Dict: return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _lowercase ( self ) -> str: return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _lowercase ( self ) -> List[str]: return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _lowercase ( self , UpperCamelCase__=0.08 , UpperCamelCase__=1.22 , UpperCamelCase__=0.03 ) -> int: return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _lowercase ( self ) -> str: return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _lowercase ( self ) -> Dict: return (self.nir / self.green) - 1 def _lowercase ( self ) -> List[Any]: return (self.nir / self.redEdge) - 1 def _lowercase ( self ) -> Optional[int]: return (self.red - self.blue) / self.red def _lowercase ( self ) -> Any: lowerCamelCase : Optional[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _lowercase ( self ) -> Optional[int]: return self.nir - self.green def _lowercase ( self ) -> Tuple: return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def _lowercase ( self , UpperCamelCase__=0.16 ) -> Any: return (self.nir - self.green) / (self.nir + self.green + y) def _lowercase ( self , UpperCamelCase__=0.5 ) -> Tuple: return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _lowercase ( self ) -> List[Any]: return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> int: return (self.nir - b) / (a * self.red) def _lowercase ( self ) -> Dict: return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _lowercase ( self ) -> Union[str, Any]: return (self.red + self.green + self.blue) / 30.5 def _lowercase ( self ) -> int: return self.nir / self.red def _lowercase ( self ) -> List[Any]: return (self.rvi() - 1) / (self.rvi() + 1) def _lowercase ( self ) -> Optional[Any]: return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _lowercase ( self ) -> List[Any]: return self.green / (self.nir + self.red + self.green) def _lowercase ( self ) -> int: return self.nir / (self.nir + self.red + self.green) def _lowercase ( self ) -> Tuple: return self.red / (self.nir + self.red + self.green) def _lowercase ( self ) -> Optional[int]: return (self.green - self.red) / (self.green + self.red) def _lowercase ( self ) -> Any: return (self.red - self.green) / (self.red + self.green) def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCamelCase : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _lowercase ( self ) -> int: return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _lowercase ( self ) -> Optional[int]: return self.nir / self.red def _lowercase ( self ) -> Optional[Any]: return (self.ndvi() + 0.5) ** (1 / 2) def _lowercase ( self ) -> Dict: return (self.nir - self.redEdge) / (self.nir + self.redEdge)
48
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model""" def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : Dict = hidden_size lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Dict = patch_size lowerCamelCase : Tuple = image_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Union[str, Any] = attention_dropout lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : str = qkv_bias @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = """blip_2_qformer""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int: super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : int = hidden_act lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : List[str] = initializer_range lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : int = position_embedding_type lowerCamelCase : Tuple = cross_attention_frequency lowerCamelCase : Optional[int] = encoder_hidden_size @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : int = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """blip-2""" lowerCamelCase_ : int = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str: super().__init__(**UpperCamelCase__ ) if vision_config is None: lowerCamelCase : List[Any] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: lowerCamelCase : List[Any] = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: lowerCamelCase : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ ) lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ ) lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt" lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings lowerCamelCase : int = self.text_config.is_encoder_decoder lowerCamelCase : Optional[Any] = num_query_tokens lowerCamelCase : int = self.vision_config.hidden_size lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCamelCase : Dict = 1.0 lowerCamelCase : List[Any] = 0.02 @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ ) lowerCamelCase : Tuple = self.vision_config.to_dict() lowerCamelCase : int = self.qformer_config.to_dict() lowerCamelCase : Optional[Any] = self.text_config.to_dict() lowerCamelCase : int = self.__class__.model_type return output
48
1
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=100 , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=[0, 1, 2, 3] , ) -> List[Any]: lowerCamelCase : Optional[Any] = parent lowerCamelCase : str = 100 lowerCamelCase : int = batch_size lowerCamelCase : Tuple = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : Dict = num_channels lowerCamelCase : Optional[int] = is_training lowerCamelCase : int = use_labels lowerCamelCase : Optional[int] = hidden_size lowerCamelCase : Optional[int] = num_hidden_layers lowerCamelCase : List[Any] = num_attention_heads lowerCamelCase : Any = intermediate_size lowerCamelCase : List[Any] = hidden_act lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : Dict = type_sequence_label_size lowerCamelCase : int = initializer_range lowerCamelCase : Optional[int] = scope lowerCamelCase : Tuple = out_indices lowerCamelCase : Dict = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase : List[str] = (image_size // patch_size) ** 2 lowerCamelCase : Optional[Any] = num_patches + 1 def _lowercase ( self ) -> List[Any]: lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Optional[int] = None lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def _lowercase ( self ) -> str: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any: lowerCamelCase : Dict = BeitModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : int = BeitForMaskedImageModeling(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : str = self.type_sequence_label_size lowerCamelCase : Optional[Any] = BeitForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : List[Any] = 1 lowerCamelCase : Any = BeitForImageClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[int] = self.num_labels lowerCamelCase : str = BeitForSemanticSegmentation(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : int = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = config_and_inputs lowerCamelCase : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[int] = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase_ : int = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase_ : Tuple = False lowerCamelCase_ : List[str] = False lowerCamelCase_ : str = False def _lowercase ( self ) -> List[str]: lowerCamelCase : Dict = BeitModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _lowercase ( self ) -> str: pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _lowercase ( self ) -> Any: pass def _lowercase ( self ) -> str: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) ) def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self ) -> Any: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ ) def _lowercase ( self ) -> Tuple: if not self.model_tester.is_training: return lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]: continue lowerCamelCase : int = model_class(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.train() lowerCamelCase : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Any = model(**UpperCamelCase__ ).loss loss.backward() def _lowercase ( self ) -> List[str]: lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCamelCase : Optional[Any] = False lowerCamelCase : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue lowerCamelCase : Dict = model_class(UpperCamelCase__ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase__ ) model.train() lowerCamelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Optional[int] = model(**UpperCamelCase__ ).loss loss.backward() def _lowercase ( self ) -> int: lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : str = _config_zero_init(UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(config=UpperCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _lowercase ( self ) -> List[str]: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Optional[Any] = BeitModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def A ( ) -> Tuple: lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> Tuple: return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self ) -> str: lowerCamelCase : List[str] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase__ ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : str = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values.to(UpperCamelCase__ ) # prepare bool_masked_pos lowerCamelCase : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase : Dict = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ ) lowerCamelCase : List[str] = outputs.logits # verify the logits lowerCamelCase : Dict = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase : int = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) ) @slow def _lowercase ( self ) -> Dict: lowerCamelCase : int = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase__ ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : int = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase : List[str] = model(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = outputs.logits # verify the logits lowerCamelCase : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase : str = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) lowerCamelCase : List[str] = 281 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ ) @slow def _lowercase ( self ) -> str: lowerCamelCase : Union[str, Any] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( UpperCamelCase__ ) lowerCamelCase : int = self.default_image_processor lowerCamelCase : Tuple = prepare_img() lowerCamelCase : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**UpperCamelCase__ ) lowerCamelCase : Tuple = outputs.logits # verify the logits lowerCamelCase : int = torch.Size((1, 2_1841) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase : Any = torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) ) lowerCamelCase : str = 2396 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ ) @slow def _lowercase ( self ) -> int: lowerCamelCase : int = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) lowerCamelCase : str = model.to(UpperCamelCase__ ) lowerCamelCase : Tuple = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ ) lowerCamelCase : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) lowerCamelCase : str = Image.open(ds[0]["file"] ) lowerCamelCase : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase : Optional[int] = model(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = outputs.logits # verify the logits lowerCamelCase : List[str] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , UpperCamelCase__ ) lowerCamelCase : Any = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: lowerCamelCase : List[str] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=UpperCamelCase__ , ) else: lowerCamelCase : str = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=UpperCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def _lowercase ( self ) -> List[str]: lowerCamelCase : List[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) lowerCamelCase : Optional[int] = model.to(UpperCamelCase__ ) lowerCamelCase : int = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ ) lowerCamelCase : List[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) lowerCamelCase : str = Image.open(ds[0]["file"] ) lowerCamelCase : Any = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ ) # forward pass with torch.no_grad(): lowerCamelCase : Dict = model(**UpperCamelCase__ ) lowerCamelCase : Optional[int] = outputs.logits.detach().cpu() lowerCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] ) lowerCamelCase : int = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ ) lowerCamelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ ) lowerCamelCase : Optional[Any] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
48
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
1
from __future__ import annotations from math import pi, sqrt def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
48
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE__ : int = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE__ : str = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Optional[int] = 2 SCREAMING_SNAKE_CASE__ : List[str] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = 4 class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[str] = """left""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCamelCase : Any = 3 lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : List[Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : List[Any] = vocab_file lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def _lowercase ( self ) -> Optional[Any]: return len(self.sp_model ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = self.__dict__.copy() lowerCamelCase : Union[str, Any] = None return state def __setstate__( self , UpperCamelCase__ ) -> int: lowerCamelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , UpperCamelCase__ ) -> Any: if self.remove_space: lowerCamelCase : Dict = " ".join(inputs.strip().split() ) else: lowerCamelCase : Union[str, Any] = inputs lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ ) lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: lowerCamelCase : List[str] = outputs.lower() return outputs def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ ) lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) lowerCamelCase : Dict = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase : Union[str, Any] = cur_pieces[1:] else: lowerCamelCase : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _lowercase ( self , UpperCamelCase__ ) -> int: return self.sp_model.PieceToId(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: return self.sp_model.IdToPiece(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase : Any = [] lowerCamelCase : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) lowerCamelCase : int = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ) lowerCamelCase : Tuple = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : str = [self.sep_token_id] lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Any = [self.sep_token_id] lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : Union[str, Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
48
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase__ : '''simple docstring''' lowerCamelCase_ : Optional[Union[str, Path]] = None lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : Optional[Dict] = None lowerCamelCase_ : Optional[str] = None lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : bool = True lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : int = 1 lowerCamelCase_ : Optional[Union[str, bool]] = None lowerCamelCase_ : bool = False lowerCamelCase_ : Optional[Dict] = None lowerCamelCase_ : Optional[str] = None def _lowercase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(UpperCamelCase__ ) for k, v in self.__dict__.items()} )
48
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Any = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : int = EfficientNetConfig() lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"] lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"] lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"] lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : Any = 1000 lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def A ( ) -> int: lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : str = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,) return preprocessor def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase : List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowerCamelCase : Dict = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowerCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: lowerCamelCase : List[str] = "efficientnet." + item[1] lowerCamelCase : int = "classifier.weight" lowerCamelCase : Union[str, Any] = "classifier.bias" return key_mapping def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: for key, value in tf_params.items(): if "normalization" in key: continue lowerCamelCase : Tuple = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,) lowerCamelCase : List[Any] = original_model.trainable_variables lowerCamelCase : Tuple = original_model.non_trainable_variables lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCamelCase : List[str] = param.numpy() lowerCamelCase : int = list(tf_params.keys() ) # Load HuggingFace model lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = outputs.logits.detach().numpy() # Original model inference lowerCamelCase : Optional[Any] = False lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 ) lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) lowerCamelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
48
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : str = (PNDMScheduler,) lowerCamelCase_ : Tuple = (("""num_inference_steps""", 5_0),) def _lowercase ( self , **UpperCamelCase__ ) -> Dict: lowerCamelCase : Optional[Any] = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**UpperCamelCase__ ) return config def _lowercase ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> List[Any]: lowerCamelCase : List[Any] = dict(self.forward_default_kwargs ) lowerCamelCase : Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) lowerCamelCase : List[str] = self.dummy_sample lowerCamelCase : List[str] = 0.1 * sample lowerCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase : str = self.get_scheduler_config(**UpperCamelCase__ ) lowerCamelCase : List[Any] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals lowerCamelCase : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = scheduler_class.from_pretrained(UpperCamelCase__ ) new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals lowerCamelCase : List[str] = dummy_past_residuals[:] lowerCamelCase : Optional[Any] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Dict = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowerCamelCase : str = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Optional[int] = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self ) -> Optional[int]: pass def _lowercase ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : str = dict(self.forward_default_kwargs ) lowerCamelCase : Dict = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = self.dummy_sample lowerCamelCase : Optional[Any] = 0.1 * sample lowerCamelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCamelCase : Tuple = self.get_scheduler_config() lowerCamelCase : List[str] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(UpperCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase : int = dummy_past_residuals[:] lowerCamelCase : List[str] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Optional[Any] = new_scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowerCamelCase : Tuple = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Tuple = new_scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowercase ( self , **UpperCamelCase__ ) -> List[str]: lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Tuple = self.get_scheduler_config(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = scheduler_class(**UpperCamelCase__ ) lowerCamelCase : int = 10 lowerCamelCase : str = self.dummy_model() lowerCamelCase : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = scheduler.step_plms(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample return sample def _lowercase ( self ) -> str: lowerCamelCase : Optional[Any] = dict(self.forward_default_kwargs ) lowerCamelCase : List[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__ ) for scheduler_class in self.scheduler_classes: lowerCamelCase : List[Any] = self.get_scheduler_config() lowerCamelCase : Dict = scheduler_class(**UpperCamelCase__ ) lowerCamelCase : str = self.dummy_sample lowerCamelCase : Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , "set_timesteps" ): scheduler.set_timesteps(UpperCamelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , "set_timesteps" ): lowerCamelCase : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCamelCase : Union[str, Any] = dummy_past_residuals[:] lowerCamelCase : Any = scheduler.step_prk(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Dict = scheduler.step_prk(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCamelCase : Tuple = scheduler.step_plms(UpperCamelCase__ , 0 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample lowerCamelCase : Any = scheduler.step_plms(UpperCamelCase__ , 1 , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self ) -> int: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ) def _lowercase ( self ) -> Tuple: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase__ ) lowerCamelCase : int = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) lowerCamelCase : List[str] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def _lowercase ( self ) -> Tuple: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase__ ) def _lowercase ( self ) -> str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase__ ) def _lowercase ( self ) -> Any: for t in [1, 5, 10]: self.check_over_forward(time_step=UpperCamelCase__ ) def _lowercase ( self ) -> Any: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCamelCase__ ) def _lowercase ( self ) -> Tuple: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 lowerCamelCase : Dict = 27 for scheduler_class in self.scheduler_classes: lowerCamelCase : Optional[int] = self.dummy_sample lowerCamelCase : Optional[Any] = 0.1 * sample lowerCamelCase : Optional[Any] = self.get_scheduler_config() lowerCamelCase : List[str] = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): lowerCamelCase : Dict = scheduler.step_prk(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample def _lowercase ( self ) -> str: with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Optional[Any] = self.scheduler_classes[0] lowerCamelCase : int = self.get_scheduler_config() lowerCamelCase : int = scheduler_class(**UpperCamelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Dict = self.full_loop() lowerCamelCase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def _lowercase ( self ) -> Dict: lowerCamelCase : str = self.full_loop(prediction_type="v_prediction" ) lowerCamelCase : Optional[int] = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : int = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def _lowercase ( self ) -> Optional[int]: # We specify different beta, so that the first alpha is 0.99 lowerCamelCase : Optional[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 ) lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : List[str] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def _lowercase ( self ) -> List[Any]: # We specify different beta, so that the first alpha is 0.99 lowerCamelCase : List[str] = self.full_loop(set_alpha_to_one=UpperCamelCase__ , beta_start=0.01 ) lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) ) lowerCamelCase : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
48
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]: if config_name_or_path is None: lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: lowerCamelCase : Dict = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCamelCase : Any = question_encoder_name_or_path lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = gen_config lowerCamelCase : Optional[Any] = question_encoder_config lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) rag_model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Sanity check. model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizers. lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase_ : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING def _lowercase ( self ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _lowercase ( self ) -> List[str]: lowerCamelCase : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" ) lowerCamelCase : Dict = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser"}, ] , ) lowerCamelCase : int = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 3_8015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 2_5506, "token_str": " accuser", }, ] , ) lowerCamelCase : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def _lowercase ( self ) -> List[str]: lowerCamelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" ) lowerCamelCase : Tuple = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"}, ] , ) lowerCamelCase : Union[str, Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_6416, "token_str": "ELS"}, ] , ) lowerCamelCase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 1_3606, "token_str": " Clara"}, ] , ) lowerCamelCase : int = unmasker("My name is <mask> <mask>" , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=6 ) , [ [ { "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 3_5676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def _lowercase ( self ) -> Dict: lowerCamelCase : Any = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" ) # convert model to fp16 pipe.model.half() lowerCamelCase : Tuple = pipe("Paris is the [MASK] of France." ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow @require_torch def _lowercase ( self ) -> List[Any]: lowerCamelCase : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" ) self.run_large_test(UpperCamelCase__ ) @slow @require_tf def _lowercase ( self ) -> str: lowerCamelCase : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" ) self.run_large_test(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : List[Any] = unmasker("My name is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ] , ) lowerCamelCase : List[Any] = unmasker("The largest city in France is <mask>" ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 1_2790, "token_str": " Lyon", }, ] , ) lowerCamelCase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 1_3606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" ) lowerCamelCase : List[str] = None lowerCamelCase : Dict = None self.run_pipeline_test(UpperCamelCase__ , [] ) @require_tf def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" ) lowerCamelCase : int = None lowerCamelCase : List[Any] = None self.run_pipeline_test(UpperCamelCase__ , [] ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" ) lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [ F'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = fill_masker.tokenizer lowerCamelCase : Optional[Any] = fill_masker.model lowerCamelCase : Optional[int] = fill_masker( F'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : int = fill_masker([F'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : Any = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( UpperCamelCase__ , [ [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], ] , ) with self.assertRaises(UpperCamelCase__ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(UpperCamelCase__ ): fill_masker("This is" ) self.run_test_top_k(UpperCamelCase__ , UpperCamelCase__ ) self.run_test_targets(UpperCamelCase__ , UpperCamelCase__ ) self.run_test_top_k_targets(UpperCamelCase__ , UpperCamelCase__ ) self.fill_mask_with_duplicate_targets_and_top_k(UpperCamelCase__ , UpperCamelCase__ ) self.fill_mask_with_multiple_masks(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any: lowerCamelCase : Dict = tokenizer.get_vocab() lowerCamelCase : Tuple = sorted(vocab.keys() )[:2] # Pipeline argument lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , targets=UpperCamelCase__ ) lowerCamelCase : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : List[str] = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ ) lowerCamelCase : Any = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) ) # Call argument lowerCamelCase : Tuple = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Tuple = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : List[str] = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , UpperCamelCase__ ) lowerCamelCase : List[str] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(UpperCamelCase__ ) ) # Score equivalence lowerCamelCase : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [top_mask["token_str"] for top_mask in outputs] lowerCamelCase : Union[str, Any] = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCamelCase__ ) == set(UpperCamelCase__ ): lowerCamelCase : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=UpperCamelCase__ ) lowerCamelCase : List[str] = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) # Raises with invalid with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Any = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[""] ) with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets="" ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , top_k=2 ) lowerCamelCase : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) lowerCamelCase : Union[str, Any] = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( UpperCamelCase__ , [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ] , ) self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = tokenizer.get_vocab() lowerCamelCase : Any = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) # top_k=2, ntargets=3 lowerCamelCase : Any = sorted(vocab.keys() )[:3] lowerCamelCase : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=UpperCamelCase__ ) # If we use the most probably targets, and filter differently, we should still # have the same results lowerCamelCase : List[Any] = [el["token_str"] for el in sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(UpperCamelCase__ ).issubset(UpperCamelCase__ ): lowerCamelCase : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=UpperCamelCase__ ) # They should yield exactly the same result self.assertEqual(nested_simplify(UpperCamelCase__ ) , nested_simplify(UpperCamelCase__ ) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str: lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : str = tokenizer.get_vocab() # String duplicates + id duplicates lowerCamelCase : Tuple = sorted(vocab.keys() )[:3] lowerCamelCase : List[str] = [targets[0], targets[1], targets[0], targets[2], targets[1]] lowerCamelCase : int = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=UpperCamelCase__ , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(UpperCamelCase__ ) , 3 ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: lowerCamelCase : int = FillMaskPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ ) lowerCamelCase : Tuple = fill_masker( F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( UpperCamelCase__ , [ [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], [ {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, {"sequence": ANY(UpperCamelCase__ ), "score": ANY(UpperCamelCase__ ), "token": ANY(UpperCamelCase__ ), "token_str": ANY(UpperCamelCase__ )}, ], ] , )
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1
import functools def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = len(_SCREAMING_SNAKE_CASE ) @functools.cache def min_distance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase : Optional[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 ,_SCREAMING_SNAKE_CASE ) ,1 + min_distance(_SCREAMING_SNAKE_CASE ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,) return min_distance(0 ,0 ) if __name__ == "__main__": import doctest doctest.testmod()
48
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any: lowerCamelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[int] = "" else: lowerCamelCase : List[str] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = in_proj_bias[: config.hidden_size] lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = val def A ( ) -> List[str]: lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase : Dict = 1000 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Dict = int(deit_name[-6:-4] ) lowerCamelCase : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase : Optional[Any] = 192 lowerCamelCase : List[str] = 768 lowerCamelCase : Tuple = 12 lowerCamelCase : Optional[Any] = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase : str = 384 lowerCamelCase : Optional[Any] = 1536 lowerCamelCase : Dict = 12 lowerCamelCase : Optional[int] = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase : str = 1024 lowerCamelCase : List[str] = 4096 lowerCamelCase : Any = 24 lowerCamelCase : Dict = 16 # load original model from timm lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase : Dict = timm_model.state_dict() lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # load HuggingFace model lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase : Any = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size ) lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" ) lowerCamelCase : int = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
48
1
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : str = MvpTokenizer lowerCamelCase_ : List[Any] = MvpTokenizerFast lowerCamelCase_ : str = True lowerCamelCase_ : str = filter_roberta_detectors def _lowercase ( self ) -> Any: super().setUp() lowerCamelCase : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCamelCase : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"} lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase__ ) ) def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , **UpperCamelCase__ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: return "lower newer", "lower newer" @cached_property def _lowercase ( self ) -> Tuple: return MvpTokenizer.from_pretrained("RUCAIBox/mvp" ) @cached_property def _lowercase ( self ) -> Optional[Any]: return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" ) @require_torch def _lowercase ( self ) -> List[str]: lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCamelCase : Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Union[str, Any] = tokenizer(UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , return_tensors="pt" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) # Test that special tokens are reset @require_torch def _lowercase ( self ) -> List[Any]: lowerCamelCase : str = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" ) # check if input_ids are returned and no labels self.assertIn("input_ids" , UpperCamelCase__ ) self.assertIn("attention_mask" , UpperCamelCase__ ) self.assertNotIn("labels" , UpperCamelCase__ ) self.assertNotIn("decoder_attention_mask" , UpperCamelCase__ ) @require_torch def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : List[str] = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase__ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def _lowercase ( self ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : Union[str, Any] = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="pt" ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def _lowercase ( self ) -> int: lowerCamelCase : str = ["A long paragraph for summarization."] lowerCamelCase : List[str] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase : str = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ , return_tensors="pt" ) lowerCamelCase : Union[str, Any] = inputs["input_ids"] lowerCamelCase : List[str] = inputs["labels"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def _lowercase ( self ) -> Optional[Any]: pass def _lowercase ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[Any] = "A, <mask> AllenNLP sentence." lowerCamelCase : str = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) lowerCamelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCamelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCamelCase : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
48
import random def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )] lowerCamelCase : Dict = 0 lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
48
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : Tuple = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ : int = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ : Any = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ : str = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } SCREAMING_SNAKE_CASE__ : Dict = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } SCREAMING_SNAKE_CASE__ : Optional[int] = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } SCREAMING_SNAKE_CASE__ : int = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } SCREAMING_SNAKE_CASE__ : List[str] = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } SCREAMING_SNAKE_CASE__ : Optional[int] = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Optional[Any] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) SCREAMING_SNAKE_CASE__ : Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(lowerCAmelCase__ ) class UpperCamelCase__ : '''simple docstring''' def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) elif titles is None or texts is None: lowerCamelCase : Optional[Any] = titles if texts is None else texts return super().__call__( UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : Union[str, Any] = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles] lowerCamelCase : int = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts] lowerCamelCase : List[Any] = len(UpperCamelCase__ ) lowerCamelCase : int = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.''' ) lowerCamelCase : Any = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"] lowerCamelCase : Any = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"] lowerCamelCase : int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ ) ] } if return_attention_mask is not False: lowerCamelCase : List[Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase : List[str] = attention_mask return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = 4 , ) -> List[DPRSpanPrediction]: lowerCamelCase : Any = reader_input["input_ids"] lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = reader_output[:3] lowerCamelCase : str = len(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ ) lowerCamelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowerCamelCase : int = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase : Any = sequence_ids.index(self.pad_token_id ) else: lowerCamelCase : Tuple = len(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> List[DPRSpanPrediction]: lowerCamelCase : Any = [] for start_index, start_score in enumerate(UpperCamelCase__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase : List[Any] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ ) lowerCamelCase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' ) lowerCamelCase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCAmelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES lowerCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Tuple = READER_PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int: lowerCamelCase : List[Any] = 1 for i in range(1 ,n + 1 ): lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return g if __name__ == "__main__": print(f'''{solution() = }''')
48
1
import operator as op def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [] lowerCamelCase : List[str] = lambda _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation lowerCamelCase : Tuple = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) ,"Action".center(12 ) ,"Stack" ,sep=" | " ) print("-" * (30 + len(_SCREAMING_SNAKE_CASE )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_SCREAMING_SNAKE_CASE ) # append x to stack # output in tabular format print(x.rjust(8 ) ,("push(" + x + ")").ljust(12 ) ,",".join(_SCREAMING_SNAKE_CASE ) ,sep=" | " ) else: lowerCamelCase : Optional[int] = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) ,("pop(" + b + ")").ljust(12 ) ,",".join(_SCREAMING_SNAKE_CASE ) ,sep=" | " ) lowerCamelCase : int = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) ,("pop(" + a + ")").ljust(12 ) ,",".join(_SCREAMING_SNAKE_CASE ) ,sep=" | " ) stack.append( str(opr[x](int(_SCREAMING_SNAKE_CASE ) ,int(_SCREAMING_SNAKE_CASE ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) ,("push(" + a + x + b + ")").ljust(12 ) ,",".join(_SCREAMING_SNAKE_CASE ) ,sep=" | " ,) return int(stack[0] ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
48
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> int: lowerCamelCase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCamelCase__ ) @slow def _lowercase ( self ) -> List[Any]: self.resolver.convert_models(["heb-eng"] ) @slow def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
48
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple: super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[int] = eval_examples lowerCamelCase : Union[str, Any] = post_process_function def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = "eval" ) -> Optional[int]: lowerCamelCase : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase : str = self.get_eval_dataloader(UpperCamelCase__ ) lowerCamelCase : int = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase : List[str] = self.compute_metrics lowerCamelCase : List[str] = None lowerCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase : Dict = time.time() try: lowerCamelCase : Union[str, Any] = eval_loop( UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: lowerCamelCase : Tuple = compute_metrics lowerCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase : int = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions ) lowerCamelCase : int = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase : List[str] = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) else: lowerCamelCase : Optional[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ ) return metrics def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" ) -> Union[str, Any]: lowerCamelCase : Any = self.get_test_dataloader(UpperCamelCase__ ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase : Tuple = self.compute_metrics lowerCamelCase : Dict = None lowerCamelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase : Optional[int] = time.time() try: lowerCamelCase : Optional[int] = eval_loop( UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , ) finally: lowerCamelCase : Dict = compute_metrics lowerCamelCase : int = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase : Any = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , "predict" ) lowerCamelCase : Union[str, Any] = self.compute_metrics(UpperCamelCase__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): lowerCamelCase : List[str] = metrics.pop(UpperCamelCase__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
48
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: # Initialise PyTorch model lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
48
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , **UpperCamelCase__ ) -> Optional[Any]: super().__init__(**UpperCamelCase__ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , **UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Optional[int] = {} if "candidate_labels" in kwargs: lowerCamelCase : str = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase : str = kwargs["hypothesis_template"] return preprocess_params, {}, {} def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="This is a photo of {}." ) -> List[Any]: lowerCamelCase : Optional[Any] = load_image(UpperCamelCase__ ) lowerCamelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCamelCase : Dict = candidate_labels lowerCamelCase : Dict = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels] lowerCamelCase : Dict = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]: lowerCamelCase : List[str] = model_inputs.pop("candidate_labels" ) lowerCamelCase : Dict = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , UpperCamelCase__ ): lowerCamelCase : Dict = text_inputs[0] else: # Batching case. lowerCamelCase : int = text_inputs[0][0] lowerCamelCase : List[Any] = self.model(**UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Tuple = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def _lowercase ( self , UpperCamelCase__ ) -> str: lowerCamelCase : Union[str, Any] = model_outputs.pop("candidate_labels" ) lowerCamelCase : Tuple = model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase : Any = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCamelCase : Optional[Any] = probs.tolist() if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : List[Any] = [scores] elif self.framework == "tf": lowerCamelCase : str = stable_softmax(UpperCamelCase__ , axis=-1 ) lowerCamelCase : str = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowerCamelCase : Any = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] ) ] return result
48
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
def A ( _SCREAMING_SNAKE_CASE ) -> int: if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or number < 0: raise ValueError("Input must be a non-negative integer" ) lowerCamelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list: lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ): lowerCamelCase : Dict = True for j in range(_SCREAMING_SNAKE_CASE ): if s[i + j] != pattern[j]: lowerCamelCase : Optional[int] = False break if match_found: position.append(_SCREAMING_SNAKE_CASE ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
48
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Any = TextToVideoSDPipeline lowerCamelCase_ : Tuple = TEXT_TO_IMAGE_PARAMS lowerCamelCase_ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. lowerCamelCase_ : Union[str, Any] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def _lowercase ( self ) -> Optional[int]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) lowerCamelCase : Optional[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) lowerCamelCase : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) lowerCamelCase : Any = CLIPTextModel(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase : int = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Union[str, Any]: if str(UpperCamelCase__ ).startswith("mps" ): lowerCamelCase : List[Any] = torch.manual_seed(UpperCamelCase__ ) else: lowerCamelCase : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _lowercase ( self ) -> Any: lowerCamelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase : int = self.get_dummy_components() lowerCamelCase : List[str] = TextToVideoSDPipeline(**UpperCamelCase__ ) lowerCamelCase : Any = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Dict = self.get_dummy_inputs(UpperCamelCase__ ) lowerCamelCase : Dict = "np" lowerCamelCase : Optional[int] = sd_pipe(**UpperCamelCase__ ).frames lowerCamelCase : Dict = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) lowerCamelCase : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Any: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _lowercase ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=1e-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _lowercase ( self ) -> Dict: pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _lowercase ( self ) -> Union[str, Any]: pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _lowercase ( self ) -> List[str]: pass def _lowercase ( self ) -> int: return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> str: lowerCamelCase : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) lowerCamelCase : Optional[Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowerCamelCase : Tuple = pipe.to("cuda" ) lowerCamelCase : str = "Spiderman is surfing" lowerCamelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : Tuple = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=25 , output_type="pt" ).frames lowerCamelCase : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def _lowercase ( self ) -> str: lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) lowerCamelCase : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) lowerCamelCase : List[Any] = pipe.to("cuda" ) lowerCamelCase : Optional[int] = "Spiderman is surfing" lowerCamelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase : int = pipe(UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="pt" ).frames lowerCamelCase : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
48
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """char""" lowerCamelCase_ : Union[str, Any] = """bpe""" lowerCamelCase_ : Optional[Any] = """wp""" SCREAMING_SNAKE_CASE__ : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = ["""image_processor""", """char_tokenizer"""] lowerCamelCase_ : Optional[int] = """ViTImageProcessor""" lowerCamelCase_ : List[Any] = """MgpstrTokenizer""" def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase__ , ) lowerCamelCase : List[str] = kwargs.pop("feature_extractor" ) lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) lowerCamelCase : Any = tokenizer lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" ) lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: lowerCamelCase : Optional[int] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: lowerCamelCase : Tuple = self.char_tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase : Dict = encodings["input_ids"] return inputs def _lowercase ( self , UpperCamelCase__ ) -> str: lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = sequences lowerCamelCase : Any = char_preds.size(0 ) lowerCamelCase , lowerCamelCase : Any = self._decode_helper(UpperCamelCase__ , "char" ) lowerCamelCase , lowerCamelCase : List[Any] = self._decode_helper(UpperCamelCase__ , "bpe" ) lowerCamelCase , lowerCamelCase : Tuple = self._decode_helper(UpperCamelCase__ , "wp" ) lowerCamelCase : Union[str, Any] = [] lowerCamelCase : str = [] for i in range(UpperCamelCase__ ): lowerCamelCase : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCamelCase : str = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCamelCase : Optional[int] = scores.index(max(UpperCamelCase__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCamelCase : Optional[Any] = {} lowerCamelCase : List[str] = final_strs lowerCamelCase : str = final_scores lowerCamelCase : Optional[int] = char_strs lowerCamelCase : Union[str, Any] = bpe_strs lowerCamelCase : Tuple = wp_strs return out def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: if format == DecodeType.CHARACTER: lowerCamelCase : Union[str, Any] = self.char_decode lowerCamelCase : List[Any] = 1 lowerCamelCase : Union[str, Any] = "[s]" elif format == DecodeType.BPE: lowerCamelCase : Dict = self.bpe_decode lowerCamelCase : List[str] = 2 lowerCamelCase : List[Any] = "#" elif format == DecodeType.WORDPIECE: lowerCamelCase : Any = self.wp_decode lowerCamelCase : List[str] = 102 lowerCamelCase : Any = "[SEP]" else: raise ValueError(F'''Format {format} is not supported.''' ) lowerCamelCase , lowerCamelCase : List[str] = [], [] lowerCamelCase : List[Any] = pred_logits.size(0 ) lowerCamelCase : Union[str, Any] = pred_logits.size(1 ) lowerCamelCase , lowerCamelCase : Optional[int] = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase__ , sorted=UpperCamelCase__ ) lowerCamelCase : Tuple = preds_index.view(-1 , UpperCamelCase__ )[:, 1:] lowerCamelCase : str = decoder(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = torch.nn.functional.softmax(UpperCamelCase__ , dim=2 ).max(dim=2 ) lowerCamelCase : Optional[int] = preds_max_prob[:, 1:] for index in range(UpperCamelCase__ ): lowerCamelCase : List[str] = preds_str[index].find(UpperCamelCase__ ) lowerCamelCase : int = preds_str[index][:pred_eos] lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist() lowerCamelCase : List[Any] = pred_index.index(UpperCamelCase__ ) if eos_token in pred_index else -1 lowerCamelCase : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(UpperCamelCase__ ) conf_scores.append(UpperCamelCase__ ) return dec_strs, conf_scores def _lowercase ( self , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Optional[int] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase__ )] return decode_strs def _lowercase ( self , UpperCamelCase__ ) -> str: return self.bpe_tokenizer.batch_decode(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Tuple = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase__ )] return decode_strs
48
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A ( _SCREAMING_SNAKE_CASE ) -> tuple: return (data["data"], data["target"]) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Predict target for test data lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 ) return predictions def A ( ) -> None: lowerCamelCase : Dict = fetch_california_housing() lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 ) lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
1
from __future__ import annotations def A ( _SCREAMING_SNAKE_CASE ) -> int: # preprocessing the first row for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
48
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json', 'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json', 'kssteven/ibert-roberta-large-mnli': ( 'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json' ), } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = """ibert""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[Any] = vocab_size lowerCamelCase : str = hidden_size lowerCamelCase : Any = num_hidden_layers lowerCamelCase : Any = num_attention_heads lowerCamelCase : Union[str, Any] = hidden_act lowerCamelCase : Optional[int] = intermediate_size lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : List[str] = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : Tuple = type_vocab_size lowerCamelCase : int = initializer_range lowerCamelCase : str = layer_norm_eps lowerCamelCase : List[str] = position_embedding_type lowerCamelCase : Union[str, Any] = quant_mode lowerCamelCase : Tuple = force_dequant class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase : Union[str, Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.17.0.dev0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) @dataclass class UpperCamelCase__ : '''simple docstring''' lowerCamelCase_ : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) lowerCamelCase_ : Optional[str] = field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) lowerCamelCase_ : int = field( default=1_0_2_4 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCamelCase_ : bool = field( default=lowerCAmelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) lowerCamelCase_ : bool = field( default=lowerCAmelCase__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) lowerCamelCase_ : Optional[int] = field( default=lowerCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowerCamelCase_ : Optional[int] = field( default=lowerCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) lowerCamelCase_ : Optional[int] = field( default=lowerCAmelCase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) lowerCamelCase_ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"""help""": """A csv or a json file containing the training data."""} ) lowerCamelCase_ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"""help""": """A csv or a json file containing the validation data."""} ) lowerCamelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A csv or a json file containing the test data."""} ) def _lowercase ( self ) -> str: if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: lowerCamelCase : str = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowerCamelCase : int = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class UpperCamelCase__ : '''simple docstring''' lowerCamelCase_ : str = field( default=lowerCAmelCase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) lowerCamelCase_ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCamelCase_ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCamelCase_ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) lowerCamelCase_ : bool = field( default=lowerCAmelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) lowerCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowerCamelCase_ : bool = field( default=lowerCAmelCase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def A ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase , lowerCamelCase , lowerCamelCase : int = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,) lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCamelCase : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : str = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowerCamelCase : Dict = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowerCamelCase : List[str] = data_args.train_file.split("." )[-1] lowerCamelCase : Optional[int] = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowerCamelCase : int = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files lowerCamelCase : Dict = load_dataset("csv" ,data_files=_SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowerCamelCase : List[Any] = load_dataset("json" ,data_files=_SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowerCamelCase : int = raw_datasets["train"].features["label"].names lowerCamelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase : str = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) # load tapex tokenizer lowerCamelCase : str = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=_SCREAMING_SNAKE_CASE ,) lowerCamelCase : Tuple = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=_SCREAMING_SNAKE_CASE ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) # Padding strategy if data_args.pad_to_max_length: lowerCamelCase : Optional[int] = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowerCamelCase : Tuple = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowerCamelCase : List[Any] = {"Refused": 0, "Entailed": 1} lowerCamelCase : Optional[int] = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(data_args.max_seq_length ,tokenizer.model_max_length ) def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE ): # Tokenize the texts def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE ): lowerCamelCase : int = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] lowerCamelCase : List[Any] = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] ) return _table_pd lowerCamelCase : Tuple = examples["statement"] lowerCamelCase : Dict = list(map(_convert_table_text_to_pandas ,examples["table_text"] ) ) lowerCamelCase : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): lowerCamelCase : Any = raw_datasets.map( _SCREAMING_SNAKE_CASE ,batched=_SCREAMING_SNAKE_CASE ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on dataset" ,) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowerCamelCase : List[str] = raw_datasets["train"] if data_args.max_train_samples is not None: lowerCamelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowerCamelCase : Any = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowerCamelCase : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) lowerCamelCase : int = raw_datasets["test"] if data_args.max_predict_samples is not None: lowerCamelCase : Dict = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) ,3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = p.predictions[0] if isinstance(p.predictions ,_SCREAMING_SNAKE_CASE ) else p.predictions lowerCamelCase : int = np.argmax(_SCREAMING_SNAKE_CASE ,axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowerCamelCase : List[str] = default_data_collator elif training_args.fpaa: lowerCamelCase : str = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=8 ) else: lowerCamelCase : Optional[int] = None # Initialize our Trainer lowerCamelCase : Dict = Trainer( model=_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,data_collator=_SCREAMING_SNAKE_CASE ,) # Training if training_args.do_train: lowerCamelCase : Optional[Any] = None if training_args.resume_from_checkpoint is not None: lowerCamelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCamelCase : Union[str, Any] = last_checkpoint lowerCamelCase : Dict = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = train_result.metrics lowerCamelCase : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Union[str, Any] = min(_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" ,_SCREAMING_SNAKE_CASE ) trainer.save_metrics("train" ,_SCREAMING_SNAKE_CASE ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase : List[Any] = trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = min(_SCREAMING_SNAKE_CASE ,len(_SCREAMING_SNAKE_CASE ) ) trainer.log_metrics("eval" ,_SCREAMING_SNAKE_CASE ) trainer.save_metrics("eval" ,_SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowerCamelCase : Dict = predict_dataset.remove_columns("label" ) lowerCamelCase : Optional[int] = trainer.predict(_SCREAMING_SNAKE_CASE ,metric_key_prefix="predict" ).predictions lowerCamelCase : Tuple = np.argmax(_SCREAMING_SNAKE_CASE ,axis=1 ) lowerCamelCase : Dict = os.path.join(training_args.output_dir ,"predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE ,"w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = label_list[item] writer.write(f'''{index}\t{item}\n''' ) lowerCamelCase : Union[str, Any] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
48
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : Tuple = 1 lowerCamelCase : int = 1 lowerCamelCase : Optional[Any] = {1: 1} for inputa in range(2 ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCamelCase : str = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCamelCase : str = counter if counter > pre_counter: lowerCamelCase : str = inputa lowerCamelCase : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
48
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = """encoder-decoder""" lowerCamelCase_ : Optional[int] = True def __init__( self , **UpperCamelCase__ ) -> Union[str, Any]: super().__init__(**UpperCamelCase__ ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCamelCase : List[str] = kwargs.pop("encoder" ) lowerCamelCase : int = encoder_config.pop("model_type" ) lowerCamelCase : Union[str, Any] = kwargs.pop("decoder" ) lowerCamelCase : str = decoder_config.pop("model_type" ) from ..auto.configuration_auto import AutoConfig lowerCamelCase : int = AutoConfig.for_model(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Dict = AutoConfig.for_model(UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Tuple = True @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> PretrainedConfig: logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) lowerCamelCase : List[Any] = True lowerCamelCase : List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCamelCase : Dict = self.encoder.to_dict() lowerCamelCase : Dict = self.decoder.to_dict() lowerCamelCase : Any = self.__class__.model_type return output
48
import argparse import os import re SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"') def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int: with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f: lowerCamelCase : List[Any] = f.read() lowerCamelCase : str = content.split("\n" ) lowerCamelCase : int = [] lowerCamelCase : List[Any] = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase : Optional[int] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase : List[str] = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(_SCREAMING_SNAKE_CASE ) ) elif "\n".join(_SCREAMING_SNAKE_CASE ) != content: return True def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]: lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )] lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames] if not overwrite and any(_SCREAMING_SNAKE_CASE ): lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
48
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model SCREAMING_SNAKE_CASE__ : List[str] = { # fairseq: 'wmt19-ru-en': {'length_penalty': 1.1}, 'wmt19-en-ru': {'length_penalty': 1.15}, 'wmt19-en-de': {'length_penalty': 1.0}, 'wmt19-de-en': {'length_penalty': 1.1}, # allenai: 'wmt16-en-de-dist-12-1': {'length_penalty': 0.6}, 'wmt16-en-de-dist-6-1': {'length_penalty': 0.6}, 'wmt16-en-de-12-1': {'length_penalty': 0.8}, 'wmt19-de-en-6-6-base': {'length_penalty': 0.6}, 'wmt19-de-en-6-6-big': {'length_penalty': 0.6}, } # this remaps the different models to their organization names SCREAMING_SNAKE_CASE__ : Optional[Any] = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: SCREAMING_SNAKE_CASE__ : str = 'facebook' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: SCREAMING_SNAKE_CASE__ : Dict = 'allenai' def A ( _SCREAMING_SNAKE_CASE ) -> Any: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowerCamelCase : Optional[Any] = dict((re.sub(r"@@$" ,"" ,_SCREAMING_SNAKE_CASE ), v) if k.endswith("@@" ) else (re.sub(r"$" ,"</w>" ,_SCREAMING_SNAKE_CASE ), v) for k, v in d.items() ) lowerCamelCase : Optional[Any] = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[f'''{k}</w>'''] lowerCamelCase : Union[str, Any] = d[k] # restore return da def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # prep assert os.path.exists(_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models lowerCamelCase : Dict = basename(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = dirname(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowerCamelCase : List[str] = cls.hub_models() lowerCamelCase : Dict = {"bpe": "fastbpe", "tokenizer": "moses"} lowerCamelCase : Optional[int] = "." # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f'''using checkpoint {checkpoint_file}''' ) lowerCamelCase : Any = hub_utils.from_pretrained( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,archive_map=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = vars(chkpt["args"]["model"] ) lowerCamelCase : int = args["source_lang"] lowerCamelCase : Union[str, Any] = args["target_lang"] lowerCamelCase : Dict = dirname(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = basename(_SCREAMING_SNAKE_CASE ) # dicts lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,f'''dict.{src_lang}.txt''' ) lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,f'''dict.{tgt_lang}.txt''' ) lowerCamelCase : Dict = Dictionary.load(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = rewrite_dict_keys(src_dict.indices ) lowerCamelCase : Any = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"vocab-src.json" ) print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowerCamelCase : Optional[int] = True for k in src_vocab.keys(): if not k.islower(): lowerCamelCase : Optional[int] = False break lowerCamelCase : int = Dictionary.load(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = rewrite_dict_keys(tgt_dict.indices ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,"vocab-tgt.json" ) print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) ) # merges_file (bpecodes) lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,VOCAB_FILES_NAMES["merges_file"] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): break with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as fin: lowerCamelCase : List[str] = fin.read() lowerCamelCase : Optional[Any] = re.sub(r" \d+$" ,"" ,_SCREAMING_SNAKE_CASE ,0 ,re.M ) # remove frequency number print(f'''Generating {merges_file}''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as fout: fout.write(_SCREAMING_SNAKE_CASE ) # model config lowerCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}''' assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}''' lowerCamelCase : Tuple = { "architectures": ["FSMTForConditionalGeneration"], "model_type": "fsmt", "activation_dropout": args["activation_dropout"], "activation_function": "relu", "attention_dropout": args["attention_dropout"], "d_model": args["decoder_embed_dim"], "dropout": args["dropout"], "init_std": 0.02, "max_position_embeddings": args["max_source_positions"], "num_hidden_layers": args["encoder_layers"], "src_vocab_size": src_vocab_size, "tgt_vocab_size": tgt_vocab_size, "langs": [src_lang, tgt_lang], "encoder_attention_heads": args["encoder_attention_heads"], "encoder_ffn_dim": args["encoder_ffn_embed_dim"], "encoder_layerdrop": args["encoder_layerdrop"], "encoder_layers": args["encoder_layers"], "decoder_attention_heads": args["decoder_attention_heads"], "decoder_ffn_dim": args["decoder_ffn_embed_dim"], "decoder_layerdrop": args["decoder_layerdrop"], "decoder_layers": args["decoder_layers"], "bos_token_id": 0, "pad_token_id": 1, "eos_token_id": 2, "is_encoder_decoder": True, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_all_embeddings"], } # good hparam defaults to start with lowerCamelCase : Optional[Any] = 5 lowerCamelCase : Any = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowerCamelCase : Optional[Any] = best_score_hparams[model_dir]["length_penalty"] else: lowerCamelCase : Optional[Any] = 1.0 print(f'''Generating {fsmt_model_config_file}''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) ) # tokenizer config lowerCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = { "langs": [src_lang, tgt_lang], "model_max_length": 1024, "do_lower_case": do_lower_case, } print(f'''Generating {fsmt_tokenizer_config_file}''' ) with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ,indent=_SCREAMING_SNAKE_CASE ) ) # model lowerCamelCase : List[str] = chkpt["models"][0] lowerCamelCase : Dict = model.state_dict() # rename keys to start with 'model.' lowerCamelCase : List[str] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowerCamelCase : Tuple = [ "model.model", "model.encoder.version", "model.decoder.version", "model.encoder_embed_tokens.weight", "model.decoder_embed_tokens.weight", "model.encoder.embed_positions._float_tensor", "model.decoder.embed_positions._float_tensor", ] for k in ignore_keys: model_state_dict.pop(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = FSMTConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = FSMTForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # check that it loads ok model_new.load_state_dict(_SCREAMING_SNAKE_CASE ,strict=_SCREAMING_SNAKE_CASE ) # save lowerCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''Generating {pytorch_weights_dump_path}''' ) torch.save(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print("Conversion is done!" ) print("\nLast step is to upload the files to s3" ) print(f'''cd {data_root}''' ) print(f'''transformers-cli upload {model_dir}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fsmt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
48
def A ( _SCREAMING_SNAKE_CASE ) -> list: if n_term == "": return [] lowerCamelCase : list = [] for temp in range(int(_SCREAMING_SNAKE_CASE ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
48
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class UpperCamelCase__ : '''simple docstring''' lowerCamelCase_ : str lowerCamelCase_ : str = None @staticmethod def _lowercase ( ) -> Dict: raise NotImplementedError def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> str: raise NotImplementedError def _lowercase ( self , UpperCamelCase__ ) -> List[Any]: raise NotImplementedError def _lowercase ( self ) -> Union[str, Any]: if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def _lowercase ( cls ) -> List[str]: return F'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Tuple = """optuna""" @staticmethod def _lowercase ( ) -> int: return is_optuna_available() def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]: return run_hp_search_optuna(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]: return default_hp_space_optuna(UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[Any] = """ray""" lowerCamelCase_ : Optional[Any] = """'ray[tune]'""" @staticmethod def _lowercase ( ) -> Any: return is_ray_available() def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: return run_hp_search_ray(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Dict: return default_hp_space_ray(UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : str = """sigopt""" @staticmethod def _lowercase ( ) -> str: return is_sigopt_available() def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: return run_hp_search_sigopt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]: return default_hp_space_sigopt(UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """wandb""" @staticmethod def _lowercase ( ) -> List[Any]: return is_wandb_available() def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> str: return run_hp_search_wandb(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Dict: return default_hp_space_wandb(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def A ( ) -> str: lowerCamelCase : Tuple = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_SCREAMING_SNAKE_CASE ) > 0: lowerCamelCase : List[Any] = available_backends[0].name if len(_SCREAMING_SNAKE_CASE ) > 1: logger.info( f'''{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
48
from __future__ import annotations import requests def A ( _SCREAMING_SNAKE_CASE ) -> dict: lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_SCREAMING_SNAKE_CASE ).json() def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]: lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids] def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str: lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE ) return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
48
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : '''simple docstring''' @staticmethod def _lowercase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: pass @is_pipeline_test @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @require_torch def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Tuple = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) lowerCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase : List[Any] = image_classifier(UpperCamelCase__ , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCamelCase__ ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) lowerCamelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], ] , ) @require_tf def _lowercase ( self ) -> int: lowerCamelCase : str = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase : Dict = image_classifier(UpperCamelCase__ , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) lowerCamelCase : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], [ {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, {"score": 0.333, "label": ANY(UpperCamelCase__ )}, ], ] , ) @slow @require_torch def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : int = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase : Optional[int] = image_classifier(UpperCamelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) lowerCamelCase : Tuple = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def _lowercase ( self ) -> Tuple: lowerCamelCase : str = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes lowerCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase : Tuple = image_classifier(UpperCamelCase__ , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) lowerCamelCase : int = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
48
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model""" def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : Dict = hidden_size lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Dict = patch_size lowerCamelCase : Tuple = image_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Union[str, Any] = attention_dropout lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : str = qkv_bias @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = """blip_2_qformer""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int: super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : int = hidden_act lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : List[str] = initializer_range lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : int = position_embedding_type lowerCamelCase : Tuple = cross_attention_frequency lowerCamelCase : Optional[int] = encoder_hidden_size @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : int = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """blip-2""" lowerCamelCase_ : int = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str: super().__init__(**UpperCamelCase__ ) if vision_config is None: lowerCamelCase : List[Any] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: lowerCamelCase : List[Any] = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: lowerCamelCase : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ ) lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ ) lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt" lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings lowerCamelCase : int = self.text_config.is_encoder_decoder lowerCamelCase : Optional[Any] = num_query_tokens lowerCamelCase : int = self.vision_config.hidden_size lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCamelCase : Dict = 1.0 lowerCamelCase : List[Any] = 0.02 @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ ) lowerCamelCase : Tuple = self.vision_config.to_dict() lowerCamelCase : int = self.qformer_config.to_dict() lowerCamelCase : Optional[Any] = self.text_config.to_dict() lowerCamelCase : int = self.__class__.model_type return output
48
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] = '▁' SCREAMING_SNAKE_CASE__ : Any = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE__ : Dict = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } SCREAMING_SNAKE_CASE__ : List[Any] = { 'google/reformer-crime-and-punishment': 524288, } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = VOCAB_FILES_NAMES lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCamelCase : str = vocab_file lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def _lowercase ( self ) -> Dict: return self.sp_model.get_piece_size() def _lowercase ( self ) -> Dict[str, int]: lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> str: lowerCamelCase : Optional[int] = self.__dict__.copy() lowerCamelCase : Dict = None return state def __setstate__( self , UpperCamelCase__ ) -> Union[str, Any]: lowerCamelCase : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]: return self.sp_model.piece_to_id(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> int: if index < self.sp_model.get_piece_size(): lowerCamelCase : Dict = self.sp_model.IdToPiece(UpperCamelCase__ ) return token def _lowercase ( self , UpperCamelCase__ ) -> Dict: lowerCamelCase : Any = [] lowerCamelCase : str = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase__ ) + token lowerCamelCase : Union[str, Any] = [] else: current_sub_tokens.append(UpperCamelCase__ ) out_string += self.sp_model.decode(UpperCamelCase__ ) return out_string.strip() def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : Optional[int] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: lowerCamelCase : Tuple = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
48
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
1
SCREAMING_SNAKE_CASE__ : str = 8.31_4462 # Unit - J mol-1 K-1 def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
48
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE__ : int = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE__ : str = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Optional[int] = 2 SCREAMING_SNAKE_CASE__ : List[str] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = 4 class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[str] = """left""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCamelCase : Any = 3 lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : List[Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : List[Any] = vocab_file lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def _lowercase ( self ) -> Optional[Any]: return len(self.sp_model ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = self.__dict__.copy() lowerCamelCase : Union[str, Any] = None return state def __setstate__( self , UpperCamelCase__ ) -> int: lowerCamelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , UpperCamelCase__ ) -> Any: if self.remove_space: lowerCamelCase : Dict = " ".join(inputs.strip().split() ) else: lowerCamelCase : Union[str, Any] = inputs lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ ) lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: lowerCamelCase : List[str] = outputs.lower() return outputs def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ ) lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) lowerCamelCase : Dict = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase : Union[str, Any] = cur_pieces[1:] else: lowerCamelCase : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _lowercase ( self , UpperCamelCase__ ) -> int: return self.sp_model.PieceToId(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: return self.sp_model.IdToPiece(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase : Any = [] lowerCamelCase : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) lowerCamelCase : int = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ) lowerCamelCase : Tuple = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : str = [self.sep_token_id] lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Any = [self.sep_token_id] lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : Union[str, Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
48
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> List[Any]: super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : str = Sql( cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = None lowerCamelCase : List[str] = None lowerCamelCase : int = None lowerCamelCase : Optional[Any] = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , ) # Build dataset for splits lowerCamelCase : Any = self.builder.as_dataset( split="train" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory ) return dataset class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) lowerCamelCase : int = dataset lowerCamelCase : int = name lowerCamelCase : Optional[int] = con lowerCamelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCamelCase : int = num_proc lowerCamelCase : int = to_sql_kwargs def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = self.to_sql_kwargs.pop("sql" , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = self.to_sql_kwargs.pop("con" , UpperCamelCase__ ) lowerCamelCase : str = self.to_sql_kwargs.pop("index" , UpperCamelCase__ ) lowerCamelCase : str = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs ) return written def _lowercase ( self , UpperCamelCase__ ) -> str: lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = args lowerCamelCase : Tuple = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs lowerCamelCase : List[str] = query_table( table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCamelCase : List[Any] = batch.to_pandas() lowerCamelCase : Any = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ ) return num_rows or len(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: lowerCamelCase , lowerCamelCase : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
48
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Any = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : int = EfficientNetConfig() lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"] lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"] lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"] lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : Any = 1000 lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def A ( ) -> int: lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : str = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,) return preprocessor def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase : List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowerCamelCase : Dict = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowerCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: lowerCamelCase : List[str] = "efficientnet." + item[1] lowerCamelCase : int = "classifier.weight" lowerCamelCase : Union[str, Any] = "classifier.bias" return key_mapping def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: for key, value in tf_params.items(): if "normalization" in key: continue lowerCamelCase : Tuple = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,) lowerCamelCase : List[Any] = original_model.trainable_variables lowerCamelCase : Tuple = original_model.non_trainable_variables lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCamelCase : List[str] = param.numpy() lowerCamelCase : int = list(tf_params.keys() ) # Load HuggingFace model lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = outputs.logits.detach().numpy() # Original model inference lowerCamelCase : Optional[Any] = False lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 ) lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) lowerCamelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
48
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) SCREAMING_SNAKE_CASE__ : str = logging.getLogger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: lowerCamelCase : List[Any] = np.argmax(_SCREAMING_SNAKE_CASE ,axis=1 ) return np.sum(outputs == labels ) def A ( _SCREAMING_SNAKE_CASE ) -> Dict: with open(_SCREAMING_SNAKE_CASE ,encoding="utf_8" ) as f: lowerCamelCase : str = csv.reader(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = [] next(_SCREAMING_SNAKE_CASE ) # skip the first line for line in tqdm(_SCREAMING_SNAKE_CASE ): output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Dict = [] for dataset in encoded_datasets: lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) lowerCamelCase : str = np.zeros((n_batch, 2) ,dtype=np.intaa ) lowerCamelCase : Optional[Any] = np.full((n_batch, 2, input_len) ,fill_value=-100 ,dtype=np.intaa ) lowerCamelCase : Dict = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_SCREAMING_SNAKE_CASE ): lowerCamelCase : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase : List[str] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase : Any = with_conta lowerCamelCase : Tuple = with_conta lowerCamelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) - 1 lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE ) - 1 lowerCamelCase : Tuple = with_conta lowerCamelCase : Dict = with_conta lowerCamelCase : Union[str, Any] = mc_label lowerCamelCase : List[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_SCREAMING_SNAKE_CASE ) for t in all_inputs ) ) return tensor_datasets def A ( ) -> int: lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=_SCREAMING_SNAKE_CASE ,default="openai-gpt" ,help="pretrained model name" ) parser.add_argument("--do_train" ,action="store_true" ,help="Whether to run training." ) parser.add_argument("--do_eval" ,action="store_true" ,help="Whether to run eval on the dev set." ) parser.add_argument( "--output_dir" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,required=_SCREAMING_SNAKE_CASE ,help="The output directory where the model predictions and checkpoints will be written." ,) parser.add_argument("--train_dataset" ,type=_SCREAMING_SNAKE_CASE ,default="" ) parser.add_argument("--eval_dataset" ,type=_SCREAMING_SNAKE_CASE ,default="" ) parser.add_argument("--seed" ,type=_SCREAMING_SNAKE_CASE ,default=42 ) parser.add_argument("--num_train_epochs" ,type=_SCREAMING_SNAKE_CASE ,default=3 ) parser.add_argument("--train_batch_size" ,type=_SCREAMING_SNAKE_CASE ,default=8 ) parser.add_argument("--eval_batch_size" ,type=_SCREAMING_SNAKE_CASE ,default=16 ) parser.add_argument("--adam_epsilon" ,default=1e-8 ,type=_SCREAMING_SNAKE_CASE ,help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" ,type=_SCREAMING_SNAKE_CASE ,default=1 ) parser.add_argument( "--max_steps" ,default=-1 ,type=_SCREAMING_SNAKE_CASE ,help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ) ,) parser.add_argument( "--gradient_accumulation_steps" ,type=_SCREAMING_SNAKE_CASE ,default=1 ,help="Number of updates steps to accumulate before performing a backward/update pass." ,) parser.add_argument("--learning_rate" ,type=_SCREAMING_SNAKE_CASE ,default=6.25e-5 ) parser.add_argument("--warmup_steps" ,default=0 ,type=_SCREAMING_SNAKE_CASE ,help="Linear warmup over warmup_steps." ) parser.add_argument("--lr_schedule" ,type=_SCREAMING_SNAKE_CASE ,default="warmup_linear" ) parser.add_argument("--weight_decay" ,type=_SCREAMING_SNAKE_CASE ,default=0.01 ) parser.add_argument("--lm_coef" ,type=_SCREAMING_SNAKE_CASE ,default=0.9 ) parser.add_argument("--n_valid" ,type=_SCREAMING_SNAKE_CASE ,default=374 ) parser.add_argument("--server_ip" ,type=_SCREAMING_SNAKE_CASE ,default="" ,help="Can be used for distant debugging." ) parser.add_argument("--server_port" ,type=_SCREAMING_SNAKE_CASE ,default="" ,help="Can be used for distant debugging." ) lowerCamelCase : Union[str, Any] = parser.parse_args() print(_SCREAMING_SNAKE_CASE ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) lowerCamelCase : Tuple = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True." ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase : Union[str, Any] = ["_start_", "_delimiter_", "_classify_"] lowerCamelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) ) model.to(_SCREAMING_SNAKE_CASE ) # Load and encode the datasets def tokenize_and_encode(_SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) ) elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): return obj return [tokenize_and_encode(_SCREAMING_SNAKE_CASE ) for o in obj] logger.info("Encoding dataset..." ) lowerCamelCase : Optional[int] = load_rocstories_dataset(args.train_dataset ) lowerCamelCase : Optional[Any] = load_rocstories_dataset(args.eval_dataset ) lowerCamelCase : List[str] = (train_dataset, eval_dataset) lowerCamelCase : Dict = tokenize_and_encode(_SCREAMING_SNAKE_CASE ) # Compute the max input length for the Transformer lowerCamelCase : List[str] = model.config.n_positions // 2 - 2 lowerCamelCase : List[Any] = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase : List[str] = min(_SCREAMING_SNAKE_CASE ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase : Any = pre_process_datasets(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase : Optional[Any] = tensor_datasets[0], tensor_datasets[1] lowerCamelCase : Dict = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = RandomSampler(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = DataLoader(_SCREAMING_SNAKE_CASE ,sampler=_SCREAMING_SNAKE_CASE ,batch_size=args.train_batch_size ) lowerCamelCase : Any = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = SequentialSampler(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = DataLoader(_SCREAMING_SNAKE_CASE ,sampler=_SCREAMING_SNAKE_CASE ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase : List[str] = args.max_steps lowerCamelCase : Optional[int] = args.max_steps // (len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase : List[Any] = len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase : Optional[Any] = list(model.named_parameters() ) lowerCamelCase : Union[str, Any] = ["bias", "LayerNorm.bias", "LayerNorm.weight"] lowerCamelCase : List[Any] = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0}, ] lowerCamelCase : Optional[Any] = AdamW(_SCREAMING_SNAKE_CASE ,lr=args.learning_rate ,eps=args.adam_epsilon ) lowerCamelCase : List[Any] = get_linear_schedule_with_warmup( _SCREAMING_SNAKE_CASE ,num_warmup_steps=args.warmup_steps ,num_training_steps=_SCREAMING_SNAKE_CASE ) if args.do_train: lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc="Epoch" ): lowerCamelCase : Any = 0 lowerCamelCase : str = 0 lowerCamelCase : str = tqdm(_SCREAMING_SNAKE_CASE ,desc="Training" ) for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): lowerCamelCase : Optional[Any] = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = batch lowerCamelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE ,mc_token_ids=_SCREAMING_SNAKE_CASE ,lm_labels=_SCREAMING_SNAKE_CASE ,mc_labels=_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase : Tuple = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(_SCREAMING_SNAKE_CASE ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase : Any = model.module if hasattr(_SCREAMING_SNAKE_CASE ,"module" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase : Dict = os.path.join(args.output_dir ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = os.path.join(args.output_dir ,_SCREAMING_SNAKE_CASE ) torch.save(model_to_save.state_dict() ,_SCREAMING_SNAKE_CASE ) model_to_save.config.to_json_file(_SCREAMING_SNAKE_CASE ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase : Tuple = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_SCREAMING_SNAKE_CASE ) if args.do_eval: model.eval() lowerCamelCase , lowerCamelCase : Optional[Any] = 0, 0 lowerCamelCase , lowerCamelCase : List[Any] = 0, 0 for batch in tqdm(_SCREAMING_SNAKE_CASE ,desc="Evaluating" ): lowerCamelCase : Union[str, Any] = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str = batch with torch.no_grad(): lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = model( _SCREAMING_SNAKE_CASE ,mc_token_ids=_SCREAMING_SNAKE_CASE ,lm_labels=_SCREAMING_SNAKE_CASE ,mc_labels=_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = mc_logits.detach().cpu().numpy() lowerCamelCase : Any = mc_labels.to("cpu" ).numpy() lowerCamelCase : str = accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase : Any = eval_loss / nb_eval_steps lowerCamelCase : Optional[Any] = eval_accuracy / nb_eval_examples lowerCamelCase : Any = tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase : List[Any] = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} lowerCamelCase : Optional[Any] = os.path.join(args.output_dir ,"eval_results.txt" ) with open(_SCREAMING_SNAKE_CASE ,"w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" ,_SCREAMING_SNAKE_CASE ,str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) if __name__ == "__main__": main()
48
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]: if config_name_or_path is None: lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: lowerCamelCase : Dict = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCamelCase : Any = question_encoder_name_or_path lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = gen_config lowerCamelCase : Optional[Any] = question_encoder_config lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) rag_model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Sanity check. model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizers. lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) SCREAMING_SNAKE_CASE__ : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') SCREAMING_SNAKE_CASE__ : int = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) SCREAMING_SNAKE_CASE__ : List[str] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) SCREAMING_SNAKE_CASE__ : List[Any] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.keras.preprocessing.image.img_to_array(test_image) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.expand_dims(test_image, axis=0) SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: SCREAMING_SNAKE_CASE__ : Dict = 'Normal' if result[0][0] == 1: SCREAMING_SNAKE_CASE__ : Tuple = 'Abnormality detected'
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=2 , ) -> Union[str, Any]: lowerCamelCase : int = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : Tuple = num_channels lowerCamelCase : List[Any] = is_training lowerCamelCase : List[str] = use_labels lowerCamelCase : int = hidden_size lowerCamelCase : Tuple = num_hidden_layers lowerCamelCase : Tuple = num_attention_heads lowerCamelCase : List[Any] = intermediate_size lowerCamelCase : Any = hidden_act lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : Optional[Any] = type_sequence_label_size lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[str] = scope lowerCamelCase : List[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase : List[str] = (image_size // patch_size) ** 2 lowerCamelCase : Dict = num_patches + 2 def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Optional[int] = None if self.use_labels: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Tuple = self.get_config() return config, pixel_values, labels def _lowercase ( self ) -> Optional[int]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: lowerCamelCase : List[str] = TFDeiTModel(config=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : int = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ ) lowerCamelCase : List[str] = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : str = 1 lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(UpperCamelCase__ ) lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Dict = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[int] = TFDeiTForImageClassification(UpperCamelCase__ ) lowerCamelCase : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : str = TFDeiTForImageClassification(UpperCamelCase__ ) lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self ) -> str: lowerCamelCase : int = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = config_and_inputs lowerCamelCase : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : List[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCamelCase_ : Optional[Any] = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCamelCase_ : Dict = False lowerCamelCase_ : Tuple = False lowerCamelCase_ : str = False lowerCamelCase_ : List[Any] = False def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Optional[int] = TFDeiTModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _lowercase ( self ) -> int: pass def _lowercase ( self ) -> List[Any]: lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) ) def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ ) lowerCamelCase : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowercase ( self ) -> str: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self ) -> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> List[str]: lowerCamelCase : Optional[Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _lowercase ( self ) -> Optional[Any]: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFDeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def A ( ) -> List[str]: lowerCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> List[str]: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _lowercase ( self ) -> str: lowerCamelCase : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) lowerCamelCase : str = self.default_image_processor lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="tf" ) # forward pass lowerCamelCase : Optional[int] = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase : str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
48
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any: lowerCamelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[int] = "" else: lowerCamelCase : List[str] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = in_proj_bias[: config.hidden_size] lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = val def A ( ) -> List[str]: lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase : Dict = 1000 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Dict = int(deit_name[-6:-4] ) lowerCamelCase : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase : Optional[Any] = 192 lowerCamelCase : List[str] = 768 lowerCamelCase : Tuple = 12 lowerCamelCase : Optional[Any] = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase : str = 384 lowerCamelCase : Optional[Any] = 1536 lowerCamelCase : Dict = 12 lowerCamelCase : Optional[int] = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase : str = 1024 lowerCamelCase : List[str] = 4096 lowerCamelCase : Any = 24 lowerCamelCase : Dict = 16 # load original model from timm lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase : Dict = timm_model.state_dict() lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # load HuggingFace model lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase : Any = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size ) lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" ) lowerCamelCase : int = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
48
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
import random def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )] lowerCamelCase : Dict = 0 lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
48
1
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } SCREAMING_SNAKE_CASE__ : Optional[int] = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: for attribute in key.split("." ): lowerCamelCase : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if weight_type is not None: lowerCamelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).shape else: lowerCamelCase : List[str] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase : str = value elif weight_type == "weight_g": lowerCamelCase : Optional[int] = value elif weight_type == "weight_v": lowerCamelCase : Dict = value elif weight_type == "bias": lowerCamelCase : Optional[int] = value else: lowerCamelCase : Tuple = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : Optional[Any] = [] lowerCamelCase : Tuple = fairseq_model.state_dict() lowerCamelCase : int = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase : str = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == "group" ,) lowerCamelCase : Dict = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase : List[str] = True if "*" in mapped_key: lowerCamelCase : int = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2] lowerCamelCase : Tuple = mapped_key.replace("*" ,_SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowerCamelCase : List[Any] = "weight_g" elif "weight_v" in name: lowerCamelCase : Tuple = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: lowerCamelCase : List[str] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase : Tuple = "weight" else: lowerCamelCase : Dict = None set_recursively(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f'''Unused weights: {unused_weights}''' ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1] lowerCamelCase : Union[str, Any] = name.split("." ) lowerCamelCase : Optional[Any] = int(items[0] ) lowerCamelCase : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase : Tuple = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase : Optional[Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase : Optional[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase : List[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Tuple: # load the pre-trained checkpoints lowerCamelCase : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = WavLMConfigOrig(checkpoint["cfg"] ) lowerCamelCase : Any = WavLMOrig(_SCREAMING_SNAKE_CASE ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: lowerCamelCase : Any = WavLMConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: lowerCamelCase : Dict = WavLMConfig() lowerCamelCase : Optional[int] = WavLMModel(_SCREAMING_SNAKE_CASE ) recursively_load_weights(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) hf_wavlm.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int: lowerCamelCase : List[Any] = 1 for i in range(1 ,n + 1 ): lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return g if __name__ == "__main__": print(f'''{solution() = }''')
48
1
import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) set_seed(770) SCREAMING_SNAKE_CASE__ : Tuple = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } SCREAMING_SNAKE_CASE__ : List[Any] = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(os.path.abspath(__file__)) SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(os.path.expanduser('~'), '.cache') SCREAMING_SNAKE_CASE__ : str = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any: lowerCamelCase : List[str] = model_type if use_small: key += "_small" return os.path.join(_SCREAMING_SNAKE_CASE ,REMOTE_MODEL_PATHS[key]["file_name"] ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE ) hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE ,filename=_SCREAMING_SNAKE_CASE ,local_dir=_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]: if model_type == "text": lowerCamelCase : Optional[int] = BarkSemanticModel lowerCamelCase : int = BarkSemanticConfig lowerCamelCase : Any = BarkSemanticGenerationConfig elif model_type == "coarse": lowerCamelCase : Optional[Any] = BarkCoarseModel lowerCamelCase : List[str] = BarkCoarseConfig lowerCamelCase : str = BarkCoarseGenerationConfig elif model_type == "fine": lowerCamelCase : Any = BarkFineModel lowerCamelCase : List[Any] = BarkFineConfig lowerCamelCase : Union[str, Any] = BarkFineGenerationConfig else: raise NotImplementedError() lowerCamelCase : int = f'''{model_type}_small''' if use_small else model_type lowerCamelCase : str = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["repo_id"] ,model_info["file_name"] ) lowerCamelCase : Tuple = torch.load(_SCREAMING_SNAKE_CASE ,map_location=_SCREAMING_SNAKE_CASE ) # this is a hack lowerCamelCase : List[Any] = checkpoint["model_args"] if "input_vocab_size" not in model_args: lowerCamelCase : Optional[int] = model_args["vocab_size"] lowerCamelCase : Dict = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments lowerCamelCase : Union[str, Any] = model_args.pop("n_head" ) lowerCamelCase : List[Any] = model_args.pop("n_embd" ) lowerCamelCase : List[Any] = model_args.pop("n_layer" ) lowerCamelCase : int = ConfigClass(**checkpoint["model_args"] ) lowerCamelCase : Optional[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = GenerationConfigClass() lowerCamelCase : Dict = model_generation_config lowerCamelCase : Optional[Any] = checkpoint["model"] # fixup checkpoint lowerCamelCase : List[str] = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(_SCREAMING_SNAKE_CASE ): # replace part of the key with corresponding layer name in HF implementation lowerCamelCase : Union[str, Any] = k[len(_SCREAMING_SNAKE_CASE ) :] for old_layer_name in new_layer_name_dict: lowerCamelCase : List[Any] = new_k.replace(_SCREAMING_SNAKE_CASE ,new_layer_name_dict[old_layer_name] ) lowerCamelCase : int = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() ) lowerCamelCase : Dict = {k for k in extra_keys if not k.endswith(".attn.bias" )} lowerCamelCase : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) lowerCamelCase : Tuple = {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f'''extra keys found: {extra_keys}''' ) if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f'''missing keys: {missing_keys}''' ) model.load_state_dict(_SCREAMING_SNAKE_CASE ,strict=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = checkpoint["best_val_loss"].item() logger.info(f'''model loaded: {round(n_params/1e6 ,1 )}M params, {round(_SCREAMING_SNAKE_CASE ,3 )} loss''' ) model.eval() model.to(_SCREAMING_SNAKE_CASE ) del checkpoint, state_dict return model def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() lowerCamelCase : Optional[Any] = "cpu" # do conversion on cpu lowerCamelCase : Tuple = _get_ckpt_path(_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = _load_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE ) # load bark initial model lowerCamelCase : Optional[int] = _bark_load_model(_SCREAMING_SNAKE_CASE ,"cpu" ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE ) if model_type == "text": lowerCamelCase : int = bark_model["model"] if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters" ) # check if same output as the bark model lowerCamelCase : Optional[int] = 5 lowerCamelCase : int = 10 if model_type in ["text", "coarse"]: lowerCamelCase : Union[str, Any] = torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int ) lowerCamelCase : Tuple = bark_model(_SCREAMING_SNAKE_CASE )[0] lowerCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE ) # take last logits lowerCamelCase : str = output_new_model_total.logits[:, [-1], :] else: lowerCamelCase : str = 3 lowerCamelCase : Union[str, Any] = 8 lowerCamelCase : Optional[int] = torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int ) lowerCamelCase : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = bark_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("initial and new outputs are not equal" ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[str]: lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) ) lowerCamelCase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) ) lowerCamelCase : Tuple = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) ) lowerCamelCase : Tuple = EncodecConfig.from_pretrained("facebook/encodec_24khz" ) lowerCamelCase : str = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" ) lowerCamelCase : Union[str, Any] = BarkConfig.from_sub_model_configs( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config ) lowerCamelCase : List[str] = BarkModel(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = semantic lowerCamelCase : Optional[Any] = coarseAcoustic lowerCamelCase : Union[str, Any] = fineAcoustic lowerCamelCase : str = codec lowerCamelCase : Tuple = bark_generation_config Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) bark.save_pretrained(_SCREAMING_SNAKE_CASE ,repo_id=_SCREAMING_SNAKE_CASE ,push_to_hub=_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') SCREAMING_SNAKE_CASE__ : str = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
48
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> int: lowerCamelCase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCamelCase__ ) @slow def _lowercase ( self ) -> List[Any]: self.resolver.convert_models(["heb-eng"] ) @slow def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
48
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[Any] = 1.5 lowerCamelCase : Optional[int] = int(factor * num_class_images ) lowerCamelCase : str = ClipClient( url="https://knn.laion.ai/knn-service" ,indice_name="laion_400m" ,num_images=_SCREAMING_SNAKE_CASE ,aesthetic_weight=0.1 ) os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=_SCREAMING_SNAKE_CASE ) if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowerCamelCase : List[Any] = client.query(text=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4: break else: lowerCamelCase : Any = int(factor * num_images ) lowerCamelCase : int = ClipClient( url="https://knn.laion.ai/knn-service" ,indice_name="laion_400m" ,num_images=_SCREAMING_SNAKE_CASE ,aesthetic_weight=0.1 ,) lowerCamelCase : Any = 0 lowerCamelCase : str = 0 lowerCamelCase : Dict = tqdm(desc="downloading real regularization images" ,total=_SCREAMING_SNAKE_CASE ) with open(f'''{class_data_dir}/caption.txt''' ,"w" ) as fa, open(f'''{class_data_dir}/urls.txt''' ,"w" ) as fa, open( f'''{class_data_dir}/images.txt''' ,"w" ) as fa: while total < num_class_images: lowerCamelCase : str = class_images[count] count += 1 try: lowerCamelCase : Union[str, Any] = requests.get(images["url"] ) if img.status_code == 200: lowerCamelCase : Dict = Image.open(BytesIO(img.content ) ) with open(f'''{class_data_dir}/images/{total}.jpg''' ,"wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def A ( ) -> List[Any]: lowerCamelCase : Dict = argparse.ArgumentParser("" ,add_help=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_prompt" ,help="text prompt to retrieve images" ,required=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--class_data_dir" ,help="path to save images" ,required=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ) parser.add_argument("--num_class_images" ,help="number of images to download" ,default=200 ,type=_SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
48
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: # Initialise PyTorch model lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
48
1
import random def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )] lowerCamelCase : Dict = 0 lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
48
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory" ,[False, True] ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : Tuple = tmp_path / "cache" lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase : Union[str, Any] = SqlDatasetReader( "dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ,keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) @require_sqlalchemy @pytest.mark.parametrize( "features" ,[ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] ,) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : Tuple = tmp_path / "cache" lowerCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features lowerCamelCase : int = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,features=_SCREAMING_SNAKE_CASE ,cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_sql_dataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ) -> Dict: with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con: lowerCamelCase : List[str] = con.cursor() cur.execute("SELECT * FROM dataset" ) for row in cur: yield row @require_sqlalchemy def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]: lowerCamelCase : str = tmp_path / "cache" lowerCamelCase : str = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" ) lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read() SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=1 ).write() lowerCamelCase : Optional[Any] = iter_sql_file(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = iter_sql_file(_SCREAMING_SNAKE_CASE ) for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): assert rowa == rowa @require_sqlalchemy def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Optional[Any] = tmp_path / "cache" lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" ) lowerCamelCase : List[str] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read() SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=2 ).write() lowerCamelCase : List[str] = iter_sql_file(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = iter_sql_file(_SCREAMING_SNAKE_CASE ) for rowa, rowa in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): assert rowa == rowa @require_sqlalchemy def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]: lowerCamelCase : List[Any] = tmp_path / "cache" lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,"tmp.sql" ) lowerCamelCase : Optional[int] = SqlDatasetReader("dataset" ,"sqlite:///" + sqlite_path ,cache_dir=_SCREAMING_SNAKE_CASE ).read() with pytest.raises(_SCREAMING_SNAKE_CASE ): SqlDatasetWriter(_SCREAMING_SNAKE_CASE ,"dataset" ,"sqlite:///" + output_sqlite_path ,num_proc=0 ).write()
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list: lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ): lowerCamelCase : Dict = True for j in range(_SCREAMING_SNAKE_CASE ): if s[i + j] != pattern[j]: lowerCamelCase : Optional[int] = False break if match_found: position.append(_SCREAMING_SNAKE_CASE ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
48
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Tuple = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: lowerCamelCase : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = val def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[Any] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCamelCase : Optional[int] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" ) lowerCamelCase : Optional[int] = value else: lowerCamelCase : List[Any] = value return new_state_dict def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[str]: lowerCamelCase : int = "" if is_panoptic: lowerCamelCase : Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCamelCase : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : List[Any] = in_proj_weight[:256, :] lowerCamelCase : int = in_proj_bias[:256] lowerCamelCase : Optional[int] = in_proj_weight[256:512, :] lowerCamelCase : int = in_proj_bias[256:512] lowerCamelCase : Any = in_proj_weight[-256:, :] lowerCamelCase : Dict = in_proj_bias[-256:] def A ( ) -> List[str]: lowerCamelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: lowerCamelCase : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowerCamelCase : str = "resnet101" if "dc5" in model_name: lowerCamelCase : int = True lowerCamelCase : int = "panoptic" in model_name if is_panoptic: lowerCamelCase : List[str] = 250 else: lowerCamelCase : int = 91 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : Optional[Any] = "coco-detection-id2label.json" lowerCamelCase : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Optional[Any] = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} # load image processor lowerCamelCase : Tuple = "coco_panoptic" if is_panoptic else "coco_detection" lowerCamelCase : List[str] = ConditionalDetrImageProcessor(format=_SCREAMING_SNAKE_CASE ) # prepare image lowerCamelCase : List[str] = prepare_img() lowerCamelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ) lowerCamelCase : int = encoding["pixel_values"] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub lowerCamelCase : Tuple = torch.hub.load("DeppMeng/ConditionalDETR" ,_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : List[Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowerCamelCase : Optional[Any] = "conditional_detr." + src rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = rename_backbone_keys(_SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(_SCREAMING_SNAKE_CASE ,is_panoptic=_SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCamelCase : Optional[int] = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): lowerCamelCase : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowerCamelCase : Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: lowerCamelCase : str = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): lowerCamelCase : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = val # finally, create HuggingFace model and load state dict lowerCamelCase : List[str] = ConditionalDetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(_SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) model.eval() model.push_to_hub(repo_id=_SCREAMING_SNAKE_CASE ,organization="DepuMeng" ,commit_message="Add model" ) # verify our conversion lowerCamelCase : Any = conditional_detr(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1e-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1e-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
48
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A ( _SCREAMING_SNAKE_CASE ) -> tuple: return (data["data"], data["target"]) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Predict target for test data lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 ) return predictions def A ( ) -> None: lowerCamelCase : Dict = fetch_california_housing() lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 ) lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
1
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Any = """deberta-v2""" def __init__( self , UpperCamelCase__=12_8100 , UpperCamelCase__=1536 , UpperCamelCase__=24 , UpperCamelCase__=24 , UpperCamelCase__=6144 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-7 , UpperCamelCase__=False , UpperCamelCase__=-1 , UpperCamelCase__=0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=0 , UpperCamelCase__="gelu" , **UpperCamelCase__ , ) -> Optional[int]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : List[str] = hidden_size lowerCamelCase : Optional[int] = num_hidden_layers lowerCamelCase : Optional[Any] = num_attention_heads lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : Any = hidden_act lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = max_position_embeddings lowerCamelCase : Optional[Any] = type_vocab_size lowerCamelCase : List[str] = initializer_range lowerCamelCase : Tuple = relative_attention lowerCamelCase : List[str] = max_relative_positions lowerCamelCase : Optional[int] = pad_token_id lowerCamelCase : int = position_biased_input # Backwards compatibility if type(UpperCamelCase__ ) == str: lowerCamelCase : Any = [x.strip() for x in pos_att_type.lower().split("|" )] lowerCamelCase : List[Any] = pos_att_type lowerCamelCase : Dict = vocab_size lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Any = kwargs.get("pooler_hidden_size" , UpperCamelCase__ ) lowerCamelCase : Dict = pooler_dropout lowerCamelCase : int = pooler_hidden_act class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' @property def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase : str = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase : int = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def _lowercase ( self ) -> int: return 12 def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 3 , UpperCamelCase__ = 40 , UpperCamelCase__ = 40 , UpperCamelCase__ = None , ) -> Mapping[str, Any]: lowerCamelCase : Union[str, Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
1
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCamelCase__ : '''simple docstring''' pass
48
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : Tuple = 1 lowerCamelCase : int = 1 lowerCamelCase : Optional[Any] = {1: 1} for inputa in range(2 ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCamelCase : str = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCamelCase : str = counter if counter > pre_counter: lowerCamelCase : str = inputa lowerCamelCase : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
48
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> str: lowerCamelCase : int = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = num_channels lowerCamelCase : Dict = image_size lowerCamelCase : Union[str, Any] = patch_size lowerCamelCase : Dict = is_training lowerCamelCase : List[str] = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : Dict = use_labels lowerCamelCase : Union[str, Any] = vocab_size lowerCamelCase : List[Any] = hidden_size lowerCamelCase : Optional[int] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : int = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[Any] = max_position_embeddings lowerCamelCase : Dict = type_vocab_size lowerCamelCase : str = type_sequence_label_size lowerCamelCase : Tuple = initializer_range lowerCamelCase : Dict = coordinate_size lowerCamelCase : Tuple = shape_size lowerCamelCase : List[Any] = num_labels lowerCamelCase : Tuple = num_choices lowerCamelCase : int = scope lowerCamelCase : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowerCamelCase : Dict = text_seq_length lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 + 1 lowerCamelCase : Union[str, Any] = self.text_seq_length + self.image_seq_length def _lowercase ( self ) -> int: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) lowerCamelCase : Optional[int] = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase : Dict = bbox[i, j, 3] lowerCamelCase : Optional[int] = bbox[i, j, 1] lowerCamelCase : Any = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase : List[str] = bbox[i, j, 2] lowerCamelCase : Optional[int] = bbox[i, j, 0] lowerCamelCase : Tuple = tmp_coordinate lowerCamelCase : Union[str, Any] = tf.constant(UpperCamelCase__ ) lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Optional[int] = None if self.use_input_mask: lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] ) lowerCamelCase : Optional[int] = None if self.use_token_type_ids: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowerCamelCase : str = None lowerCamelCase : Optional[int] = None if self.use_labels: lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowerCamelCase : Any = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : Optional[int] = TFLayoutLMvaModel(config=UpperCamelCase__ ) # text + image lowerCamelCase : Any = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) lowerCamelCase : Optional[int] = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , training=UpperCamelCase__ , ) lowerCamelCase : Any = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowerCamelCase : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowerCamelCase : Union[str, Any] = model({"pixel_values": pixel_values} , training=UpperCamelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : str = self.num_labels lowerCamelCase : Optional[Any] = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase__ ) lowerCamelCase : str = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: lowerCamelCase : int = self.num_labels lowerCamelCase : str = TFLayoutLMvaForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase : Tuple = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase__ ) lowerCamelCase : Tuple = model( UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , training=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self ) -> str: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[str] = config_and_inputs lowerCamelCase : Dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCamelCase_ : Dict = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCamelCase_ : int = False lowerCamelCase_ : int = False lowerCamelCase_ : str = False def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: return True def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> dict: lowerCamelCase : Optional[Any] = copy.deepcopy(UpperCamelCase__ ) if model_class in get_values(UpperCamelCase__ ): lowerCamelCase : List[str] = { k: tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(UpperCamelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCamelCase__ ): lowerCamelCase : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): lowerCamelCase : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Dict = TFLayoutLMvaModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> Optional[int]: self.config_tester.run_common_tests() def _lowercase ( self ) -> Optional[int]: lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = model_class(UpperCamelCase__ ) if getattr(UpperCamelCase__ , "hf_compute_loss" , UpperCamelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase__ )[0] ] lowerCamelCase : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Dict = prepared_for_class.pop("input_ids" ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ , **UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Dict = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: lowerCamelCase : List[str] = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: lowerCamelCase : Optional[int] = -100 lowerCamelCase : Any = tf.convert_to_tensor(UpperCamelCase__ ) lowerCamelCase : Dict = model(UpperCamelCase__ , **UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple lowerCamelCase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase__ , return_labels=UpperCamelCase__ ) # Get keys that were added with the _prepare_for_class function lowerCamelCase : Any = prepared_for_class.keys() - inputs_dict.keys() lowerCamelCase : Any = inspect.signature(model.call ).parameters lowerCamelCase : List[str] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple lowerCamelCase : Any = {0: "input_ids"} for label_key in label_keys: lowerCamelCase : Optional[Any] = signature_names.index(UpperCamelCase__ ) lowerCamelCase : int = label_key lowerCamelCase : str = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple lowerCamelCase : List[str] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: lowerCamelCase : Optional[Any] = prepared_for_class[value] lowerCamelCase : Any = tuple(UpperCamelCase__ ) # Send to model lowerCamelCase : List[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _lowercase ( self ) -> Optional[Any]: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : List[str] = type self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> Optional[int]: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> Any: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> Union[str, Any]: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @slow def _lowercase ( self ) -> Optional[int]: for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def A ( ) -> int: lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> Union[str, Any]: return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None @slow def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Union[str, Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : Dict = image_processor(images=UpperCamelCase__ , return_tensors="tf" ).pixel_values lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] ) lowerCamelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass lowerCamelCase : List[Any] = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , training=UpperCamelCase__ ) # verify the logits lowerCamelCase : Dict = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ ) lowerCamelCase : Dict = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
48
import argparse import os import re SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"') def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int: with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f: lowerCamelCase : List[Any] = f.read() lowerCamelCase : str = content.split("\n" ) lowerCamelCase : int = [] lowerCamelCase : List[Any] = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase : Optional[int] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase : List[str] = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(_SCREAMING_SNAKE_CASE ) ) elif "\n".join(_SCREAMING_SNAKE_CASE ) != content: return True def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]: lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )] lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames] if not overwrite and any(_SCREAMING_SNAKE_CASE ): lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
48
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> str: lowerCamelCase : int = size if size is not None else {"height": 18, "width": 18} lowerCamelCase : int = parent lowerCamelCase : str = batch_size lowerCamelCase : List[Any] = num_channels lowerCamelCase : Any = image_size lowerCamelCase : Optional[int] = min_resolution lowerCamelCase : Dict = max_resolution lowerCamelCase : Tuple = do_resize lowerCamelCase : Tuple = size lowerCamelCase : Tuple = do_normalize lowerCamelCase : Any = image_mean lowerCamelCase : int = image_std def _lowercase ( self ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Dict = ViTImageProcessor if is_vision_available() else None def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = EfficientFormerImageProcessorTester(self ) @property def _lowercase ( self ) -> Tuple: return self.image_proc_tester.prepare_image_processor_dict() def _lowercase ( self ) -> Optional[int]: lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) def _lowercase ( self ) -> List[Any]: pass def _lowercase ( self ) -> List[str]: # Initialize image_processor lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input lowerCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched lowerCamelCase : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _lowercase ( self ) -> str: # Initialize image_processor lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input lowerCamelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _lowercase ( self ) -> int: # Initialize image_processor lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched lowerCamelCase : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
48
def A ( _SCREAMING_SNAKE_CASE ) -> list: if n_term == "": return [] lowerCamelCase : list = [] for temp in range(int(_SCREAMING_SNAKE_CASE ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
48
1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def A ( _SCREAMING_SNAKE_CASE ) -> bytes: # Make sure the supplied data is a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : int = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = "".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data ) lowerCamelCase : List[str] = len(_SCREAMING_SNAKE_CASE ) % 6 != 0 if padding_needed: # The padding that will be added later lowerCamelCase : Optional[int] = B"=" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6) else: lowerCamelCase : List[Any] = B"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] ,2 )] for index in range(0 ,len(_SCREAMING_SNAKE_CASE ) ,6 ) ).encode() + padding ) def A ( _SCREAMING_SNAKE_CASE ) -> bytes: # Make sure encoded_data is either a string or a bytes-like object if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : List[Any] = ( "argument should be a bytes-like object or ASCII string, " f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_SCREAMING_SNAKE_CASE ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): try: lowerCamelCase : Optional[Any] = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) lowerCamelCase : Optional[Any] = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one lowerCamelCase : Optional[Any] = encoded_data[:-padding] lowerCamelCase : Tuple = "".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: lowerCamelCase : Dict = "".join( bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data ) lowerCamelCase : int = [ int(binary_stream[index : index + 8] ,2 ) for index in range(0 ,len(_SCREAMING_SNAKE_CASE ) ,8 ) ] return bytes(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
48
from __future__ import annotations import requests def A ( _SCREAMING_SNAKE_CASE ) -> dict: lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_SCREAMING_SNAKE_CASE ).json() def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]: lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids] def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str: lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE ) return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
48
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar('T') class UpperCamelCase__ (Generic[T] ): '''simple docstring''' lowerCamelCase_ : deque[T] # Cache store of keys lowerCamelCase_ : set[T] # References of the keys in cache lowerCamelCase_ : int = 1_0 # Maximum capacity of cache def __init__( self , UpperCamelCase__ ) -> None: lowerCamelCase : Dict = deque() lowerCamelCase : int = set() if not n: lowerCamelCase : Optional[int] = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowerCamelCase : Union[str, Any] = n def _lowercase ( self , UpperCamelCase__ ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowerCamelCase : Optional[Any] = self.dq_store.pop() self.key_reference.remove(UpperCamelCase__ ) else: self.dq_store.remove(UpperCamelCase__ ) self.dq_store.appendleft(UpperCamelCase__ ) self.key_reference.add(UpperCamelCase__ ) def _lowercase ( self ) -> None: for k in self.dq_store: print(UpperCamelCase__ ) def __repr__( self ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : LRUCache[str | int] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
48
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model""" def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : Dict = hidden_size lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Dict = patch_size lowerCamelCase : Tuple = image_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Union[str, Any] = attention_dropout lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : str = qkv_bias @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = """blip_2_qformer""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int: super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : int = hidden_act lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : List[str] = initializer_range lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : int = position_embedding_type lowerCamelCase : Tuple = cross_attention_frequency lowerCamelCase : Optional[int] = encoder_hidden_size @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : int = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """blip-2""" lowerCamelCase_ : int = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str: super().__init__(**UpperCamelCase__ ) if vision_config is None: lowerCamelCase : List[Any] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: lowerCamelCase : List[Any] = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: lowerCamelCase : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ ) lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ ) lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt" lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings lowerCamelCase : int = self.text_config.is_encoder_decoder lowerCamelCase : Optional[Any] = num_query_tokens lowerCamelCase : int = self.vision_config.hidden_size lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCamelCase : Dict = 1.0 lowerCamelCase : List[Any] = 0.02 @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ ) lowerCamelCase : Tuple = self.vision_config.to_dict() lowerCamelCase : int = self.qformer_config.to_dict() lowerCamelCase : Optional[Any] = self.text_config.to_dict() lowerCamelCase : int = self.__class__.model_type return output
48
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : Optional[Any] = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE__ : Any = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, 'bert-base-german-cased': 512, 'bert-large-uncased-whole-word-masking': 512, 'bert-large-cased-whole-word-masking': 512, 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, 'bert-large-cased-whole-word-masking-finetuned-squad': 512, 'bert-base-cased-finetuned-mrpc': 512, 'bert-base-german-dbmdz-cased': 512, 'bert-base-german-dbmdz-uncased': 512, 'TurkuNLP/bert-base-finnish-cased-v1': 512, 'TurkuNLP/bert-base-finnish-uncased-v1': 512, 'wietsedv/bert-base-dutch-cased': 512, } SCREAMING_SNAKE_CASE__ : int = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : str = VOCAB_FILES_NAMES lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : List[str] = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[Any] = BertTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]: super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCamelCase__ ) != tokenize_chinese_chars ): lowerCamelCase : Dict = getattr(UpperCamelCase__ , normalizer_state.pop("type" ) ) lowerCamelCase : str = do_lower_case lowerCamelCase : str = strip_accents lowerCamelCase : Union[str, Any] = tokenize_chinese_chars lowerCamelCase : Dict = normalizer_class(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = do_lower_case def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Dict: lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Tuple = [self.sep_token_id] lowerCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: lowerCamelCase : Dict = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ )
48
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[Any] = ["""image_processor""", """tokenizer"""] lowerCamelCase_ : Dict = """BlipImageProcessor""" lowerCamelCase_ : Any = """AutoTokenizer""" def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Dict = False super().__init__(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[Any] = self.image_processor def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding: if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: lowerCamelCase : int = self.tokenizer lowerCamelCase : str = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) return text_encoding # add pixel_values lowerCamelCase : Dict = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) if text is not None: lowerCamelCase : Union[str, Any] = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) else: lowerCamelCase : List[Any] = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase__ ) return encoding_image_processor def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowercase ( self ) -> Optional[int]: lowerCamelCase : str = self.tokenizer.model_input_names lowerCamelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
48
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE__ : int = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE__ : str = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Optional[int] = 2 SCREAMING_SNAKE_CASE__ : List[str] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = 4 class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[str] = """left""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCamelCase : Any = 3 lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : List[Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : List[Any] = vocab_file lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def _lowercase ( self ) -> Optional[Any]: return len(self.sp_model ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = self.__dict__.copy() lowerCamelCase : Union[str, Any] = None return state def __setstate__( self , UpperCamelCase__ ) -> int: lowerCamelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , UpperCamelCase__ ) -> Any: if self.remove_space: lowerCamelCase : Dict = " ".join(inputs.strip().split() ) else: lowerCamelCase : Union[str, Any] = inputs lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ ) lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: lowerCamelCase : List[str] = outputs.lower() return outputs def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ ) lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) lowerCamelCase : Dict = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase : Union[str, Any] = cur_pieces[1:] else: lowerCamelCase : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _lowercase ( self , UpperCamelCase__ ) -> int: return self.sp_model.PieceToId(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: return self.sp_model.IdToPiece(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase : Any = [] lowerCamelCase : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) lowerCamelCase : int = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ) lowerCamelCase : Tuple = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : str = [self.sep_token_id] lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Any = [self.sep_token_id] lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : Union[str, Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
48
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : Any = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE__ : Tuple = { 'google/fnet-base': 512, 'google/fnet-large': 512, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = '▁' class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : int = VOCAB_FILES_NAMES lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : Optional[int] = ["""input_ids""", """token_type_ids"""] lowerCamelCase_ : List[str] = FNetTokenizer def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> str: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCamelCase : Any = ( AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token ) super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : Union[str, Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : Optional[Any] = vocab_file lowerCamelCase : List[Any] = False if not self.vocab_file else True def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : int = [self.sep_token_id] lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : List[str] = [self.sep_token_id] lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : int = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
48
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Any = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : int = EfficientNetConfig() lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"] lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"] lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"] lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : Any = 1000 lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def A ( ) -> int: lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : str = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,) return preprocessor def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase : List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowerCamelCase : Dict = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowerCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: lowerCamelCase : List[str] = "efficientnet." + item[1] lowerCamelCase : int = "classifier.weight" lowerCamelCase : Union[str, Any] = "classifier.bias" return key_mapping def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: for key, value in tf_params.items(): if "normalization" in key: continue lowerCamelCase : Tuple = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,) lowerCamelCase : List[Any] = original_model.trainable_variables lowerCamelCase : Tuple = original_model.non_trainable_variables lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCamelCase : List[str] = param.numpy() lowerCamelCase : int = list(tf_params.keys() ) # Load HuggingFace model lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = outputs.logits.detach().numpy() # Original model inference lowerCamelCase : Optional[Any] = False lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 ) lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) lowerCamelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
48
1
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def _lowercase ( self , UpperCamelCase__=0 ) -> Dict: lowerCamelCase : List[Any] = np.random.RandomState(UpperCamelCase__ ) lowerCamelCase : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Dict = self.get_dummy_inputs() lowerCamelCase : Optional[Any] = pipe(**UpperCamelCase__ ).images lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : str = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Optional[int]: lowerCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Any = self.get_dummy_inputs() lowerCamelCase : int = pipe(**UpperCamelCase__ ).images lowerCamelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : Any = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Dict = self.get_dummy_inputs() lowerCamelCase : Any = pipe(**UpperCamelCase__ ).images lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> int: lowerCamelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : List[str] = self.get_dummy_inputs() lowerCamelCase : str = pipe(**UpperCamelCase__ ).images lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : Optional[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : List[str] = self.get_dummy_inputs() lowerCamelCase : str = pipe(**UpperCamelCase__ ).images lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : Optional[Any] = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : int = self.get_dummy_inputs() lowerCamelCase : Union[str, Any] = pipe(**UpperCamelCase__ ).images lowerCamelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCamelCase : str = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Any = self.get_dummy_inputs() lowerCamelCase : Optional[int] = 3 * [inputs["prompt"]] # forward lowerCamelCase : str = pipe(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = output.images[0, -3:, -3:, -1] lowerCamelCase : Optional[Any] = self.get_dummy_inputs() lowerCamelCase : Optional[Any] = 3 * [inputs.pop("prompt" )] lowerCamelCase : Any = pipe.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="np" , ) lowerCamelCase : Optional[int] = text_inputs["input_ids"] lowerCamelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowerCamelCase : str = prompt_embeds # forward lowerCamelCase : Tuple = pipe(**UpperCamelCase__ ) lowerCamelCase : Tuple = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _lowercase ( self ) -> Tuple: lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.get_dummy_inputs() lowerCamelCase : Any = 3 * ["this is a negative prompt"] lowerCamelCase : str = negative_prompt lowerCamelCase : Tuple = 3 * [inputs["prompt"]] # forward lowerCamelCase : Dict = pipe(**UpperCamelCase__ ) lowerCamelCase : int = output.images[0, -3:, -3:, -1] lowerCamelCase : List[Any] = self.get_dummy_inputs() lowerCamelCase : Tuple = 3 * [inputs.pop("prompt" )] lowerCamelCase : Tuple = [] for p in [prompt, negative_prompt]: lowerCamelCase : int = pipe.tokenizer( UpperCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="np" , ) lowerCamelCase : Optional[int] = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowerCamelCase , lowerCamelCase : List[str] = embeds # forward lowerCamelCase : Optional[int] = pipe(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self ) -> Optional[int]: lowerCamelCase : str = ort.SessionOptions() lowerCamelCase : str = False return options def _lowercase ( self ) -> Optional[Any]: # using the PNDM scheduler by default lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Any = "A painting of a squirrel eating a burger" np.random.seed(0 ) lowerCamelCase : Tuple = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" ) lowerCamelCase : str = output.images lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase : Tuple = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self ) -> Optional[int]: lowerCamelCase : List[Any] = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = "open neural network exchange" lowerCamelCase : Tuple = np.random.RandomState(0 ) lowerCamelCase : str = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" ) lowerCamelCase : Tuple = output.images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase : Dict = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self ) -> Any: lowerCamelCase : Any = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : int = "open neural network exchange" lowerCamelCase : Optional[Any] = np.random.RandomState(0 ) lowerCamelCase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" ) lowerCamelCase : Optional[Any] = output.images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self ) -> int: lowerCamelCase : List[str] = 0 def test_callback_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None: lowerCamelCase : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowerCamelCase : int = latents[0, -3:, -3:, -1] lowerCamelCase : Optional[int] = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowerCamelCase : Union[str, Any] = latents[0, -3:, -3:, -1] lowerCamelCase : List[str] = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 lowerCamelCase : List[str] = False lowerCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = "Andromeda galaxy in a bottle" lowerCamelCase : List[str] = np.random.RandomState(0 ) pipe( prompt=UpperCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _lowercase ( self ) -> str: lowerCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) assert pipe.safety_checker is None lowerCamelCase : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCamelCase__ ) lowerCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained(UpperCamelCase__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCamelCase : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None
48
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]: if config_name_or_path is None: lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: lowerCamelCase : Dict = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCamelCase : Any = question_encoder_name_or_path lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = gen_config lowerCamelCase : Optional[Any] = question_encoder_config lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) rag_model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Sanity check. model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizers. lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' @register_to_config def __init__( self , *, UpperCamelCase__ = 4 , UpperCamelCase__ = 768 , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]: super().__init__() lowerCamelCase : List[str] = nn.Parameter(torch.zeros(UpperCamelCase__ ) ) # parameters for additional clip time embeddings lowerCamelCase : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) # parameters for encoder hidden states lowerCamelCase : Dict = clip_extra_context_tokens lowerCamelCase : Tuple = nn.Linear( UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim ) lowerCamelCase : str = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = nn.LayerNorm(UpperCamelCase__ ) def _lowercase ( self , *, UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCamelCase : Union[str, Any] = image_embeddings.shape[0] lowerCamelCase : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCamelCase : Optional[Any] = classifier_free_guidance_embeddings.expand( UpperCamelCase__ , -1 ) lowerCamelCase : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCamelCase : Union[str, Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCamelCase : str = self.embedding_proj(UpperCamelCase__ ) lowerCamelCase : str = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ ) lowerCamelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCamelCase : str = self.clip_extra_context_tokens_proj(UpperCamelCase__ ) lowerCamelCase : Any = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens ) lowerCamelCase : Optional[int] = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowerCamelCase : Optional[Any] = self.encoder_hidden_states_proj(UpperCamelCase__ ) lowerCamelCase : List[str] = self.text_encoder_hidden_states_norm(UpperCamelCase__ ) lowerCamelCase : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1
# Algorithm for the pigeonhole sorting def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]: lowerCamelCase : Any = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value lowerCamelCase : Union[str, Any] = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value lowerCamelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowerCamelCase : Any = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowerCamelCase : List[str] = 0 for count in range(_SCREAMING_SNAKE_CASE ): while holes[count] > 0: holes[count] -= 1 lowerCamelCase : int = count + min_val i += 1 def A ( ) -> Optional[Any]: lowerCamelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(_SCREAMING_SNAKE_CASE ) print("Sorted order is:" ," ".join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
48
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any: lowerCamelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase : Optional[int] = "" else: lowerCamelCase : List[str] = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase : Any = in_proj_bias[: config.hidden_size] lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase : List[str] = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = val def A ( ) -> List[str]: lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase : Dict = 1000 lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Dict = int(deit_name[-6:-4] ) lowerCamelCase : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase : Optional[Any] = 192 lowerCamelCase : List[str] = 768 lowerCamelCase : Tuple = 12 lowerCamelCase : Optional[Any] = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase : str = 384 lowerCamelCase : Optional[Any] = 1536 lowerCamelCase : Dict = 12 lowerCamelCase : Optional[int] = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase : str = 1024 lowerCamelCase : List[str] = 4096 lowerCamelCase : Any = 24 lowerCamelCase : Dict = 16 # load original model from timm lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase : Dict = timm_model.state_dict() lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # load HuggingFace model lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase : Any = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size ) lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" ) lowerCamelCase : int = encoding["pixel_values"] lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
48
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } SCREAMING_SNAKE_CASE__ : Tuple = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: for attribute in key.split("." ): lowerCamelCase : Dict = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if weight_type is not None: lowerCamelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).shape else: lowerCamelCase : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase : Optional[Any] = value elif weight_type == "weight_g": lowerCamelCase : Optional[Any] = value elif weight_type == "weight_v": lowerCamelCase : str = value elif weight_type == "bias": lowerCamelCase : Union[str, Any] = value else: lowerCamelCase : Dict = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: lowerCamelCase : Optional[int] = [] lowerCamelCase : Optional[Any] = fairseq_model.state_dict() lowerCamelCase : Dict = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight lowerCamelCase : str = None for name, value in fairseq_dict.items(): lowerCamelCase : str = False if "conv_layers" in name: load_conv_layer( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,hf_model.config.feat_extract_norm == "group" ,) lowerCamelCase : Union[str, Any] = True elif name.split("." )[0] == "proj": lowerCamelCase : List[str] = fairseq_model.proj lowerCamelCase : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase : int = True if "*" in mapped_key: lowerCamelCase : Union[str, Any] = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2] lowerCamelCase : List[str] = mapped_key.replace("*" ,_SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowerCamelCase : Optional[int] = "weight_g" elif "weight_v" in name: lowerCamelCase : int = "weight_v" elif "bias" in name: lowerCamelCase : int = "bias" elif "weight" in name: lowerCamelCase : List[str] = "weight" else: lowerCamelCase : Dict = None set_recursively(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(_SCREAMING_SNAKE_CASE ) logger.warning(f'''Unused weights: {unused_weights}''' ) return proj_weight def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = full_name.split("conv_layers." )[-1] lowerCamelCase : Any = name.split("." ) lowerCamelCase : int = int(items[0] ) lowerCamelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase : int = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase : Dict = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase : List[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase , lowerCamelCase : List[str] = emb.weight.shape lowerCamelCase : int = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,bias=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = emb.weight.data return lin_layer def A ( _SCREAMING_SNAKE_CASE ) -> Dict: with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f: lowerCamelCase : Union[str, Any] = f.readlines() lowerCamelCase : Optional[int] = [line.split(" " )[0] for line in lines] lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE ,range(4 ,num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[int]: lowerCamelCase : Dict = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = SpeechaTextaConfig.from_pretrained( _SCREAMING_SNAKE_CASE ,vocab_size=_SCREAMING_SNAKE_CASE ,decoder_layers=_SCREAMING_SNAKE_CASE ,do_stable_layer_norm=_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,) lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) lowerCamelCase : int = model[0].eval() # set weights for wav2vec2 encoder lowerCamelCase : str = WavaVecaModel(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = recursively_load_weights_wavaveca(model.encoder ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove("embed_out" ) lowerCamelCase : Dict = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowerCamelCase : str = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = False # add projection layer lowerCamelCase : List[Any] = nn.Parameter(projection_layer.weight ) lowerCamelCase : Tuple = nn.Parameter(projection_layer.bias ) lowerCamelCase : Dict = create_vocab_dict(_SCREAMING_SNAKE_CASE ) with open(os.path.join(_SCREAMING_SNAKE_CASE ,"vocab.json" ) ,"w" ) as fp: json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE ,"vocab.json" ) ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = hf_wavavec.config.to_dict() lowerCamelCase : str = tokenizer.pad_token_id lowerCamelCase : List[Any] = tokenizer.bos_token_id lowerCamelCase : List[Any] = tokenizer.eos_token_id lowerCamelCase : str = "speech_to_text_2" lowerCamelCase : Dict = "wav2vec2" lowerCamelCase : Dict = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-large-lv60', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/s2t-small-mustc-en-fr-st', type=str, help='Path to hf decoder s2t checkpoint config', ) parser.add_argument('--vocab_size', default=10224, type=int, help='Vocab size of decoder') parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
48
import random def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple: lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], [] for element in data: if element < pivot: less.append(_SCREAMING_SNAKE_CASE ) elif element > pivot: greater.append(_SCREAMING_SNAKE_CASE ) else: equal.append(_SCREAMING_SNAKE_CASE ) return less, equal, greater def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: # index = len(items) // 2 when trying to find the median # (value of index when items is sorted) # invalid input if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0: return None lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )] lowerCamelCase : Dict = 0 lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # must be in larger else: return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
48
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') SCREAMING_SNAKE_CASE__ : Dict = {'target_lang': 'fi', 'source_lang': 'en'} SCREAMING_SNAKE_CASE__ : Any = '>>zh<<' SCREAMING_SNAKE_CASE__ : int = 'Helsinki-NLP/' if is_torch_available(): SCREAMING_SNAKE_CASE__ : Optional[int] = 'pt' elif is_tf_available(): SCREAMING_SNAKE_CASE__ : str = 'tf' else: SCREAMING_SNAKE_CASE__ : Dict = 'jax' @require_sentencepiece class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Any = MarianTokenizer lowerCamelCase_ : Tuple = False lowerCamelCase_ : List[str] = True def _lowercase ( self ) -> Union[str, Any]: super().setUp() lowerCamelCase : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] lowerCamelCase : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase : int = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] ) lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self , **UpperCamelCase__ ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]: return ( "This is a test", "This is a test", ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = "</s>" lowerCamelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(UpperCamelCase__ ) , 9 ) def _lowercase ( self ) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _lowercase ( self ) -> str: lowerCamelCase : Any = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) lowerCamelCase : Union[str, Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] ) lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("*" )] self.assertIn("source.spm" , UpperCamelCase__ ) MarianTokenizer.from_pretrained(UpperCamelCase__ ) def _lowercase ( self ) -> Any: lowerCamelCase : Dict = self.get_tokenizer() lowerCamelCase : Optional[Any] = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = self.get_tokenizer() lowerCamelCase : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _lowercase ( self ) -> List[str]: # fmt: off lowerCamelCase : Dict = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) lowerCamelCase : int = "Tämä on testi" lowerCamelCase : List[str] = "This is a test" lowerCamelCase : Optional[int] = [76, 7, 2047, 2] lowerCamelCase : List[str] = [69, 12, 11, 940, 2] lowerCamelCase : Optional[int] = tokenizer(UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = tokenizer(text_target=UpperCamelCase__ ).input_ids self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int: lowerCamelCase : List[Any] = 1 for i in range(1 ,n + 1 ): lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return g if __name__ == "__main__": print(f'''{solution() = }''')
48
1
import numpy as np def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: return np.where(vector > 0 ,_SCREAMING_SNAKE_CASE ,(alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
48
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" ) class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self ) -> int: lowerCamelCase : str = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCamelCase__ ) @slow def _lowercase ( self ) -> List[Any]: self.resolver.convert_models(["heb-eng"] ) @slow def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
48
1
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , ) -> Optional[int]: lowerCamelCase : int = parent lowerCamelCase : int = 13 lowerCamelCase : str = 7 lowerCamelCase : Any = True lowerCamelCase : Optional[int] = True lowerCamelCase : Dict = True lowerCamelCase : List[Any] = 99 lowerCamelCase : List[Any] = 32 lowerCamelCase : str = 2 lowerCamelCase : Union[str, Any] = 4 lowerCamelCase : str = 37 lowerCamelCase : Any = "gelu" lowerCamelCase : Optional[Any] = 0.1 lowerCamelCase : Dict = 0.1 lowerCamelCase : Optional[Any] = 512 lowerCamelCase : Optional[Any] = 16 lowerCamelCase : List[Any] = 2 lowerCamelCase : int = 0.02 lowerCamelCase : Tuple = 3 lowerCamelCase : Optional[int] = 4 lowerCamelCase : Any = None def _lowercase ( self ) -> List[str]: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Union[str, Any] = None if self.use_input_mask: lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Union[str, Any] = None lowerCamelCase : Tuple = None lowerCamelCase : Tuple = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : Union[str, Any] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self ) -> Dict: ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Dict = self.prepare_config_and_inputs() lowerCamelCase : Union[str, Any] = True lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : List[Any] = TFEsmModel(config=UpperCamelCase__ ) lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase : List[Any] = model(UpperCamelCase__ ) lowerCamelCase : int = [input_ids, input_mask] lowerCamelCase : Optional[Any] = model(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = True lowerCamelCase : Union[str, Any] = TFEsmModel(config=UpperCamelCase__ ) lowerCamelCase : str = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } lowerCamelCase : List[str] = model(UpperCamelCase__ ) lowerCamelCase : Tuple = [input_ids, input_mask] lowerCamelCase : Dict = model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ) # Also check the case where encoder outputs are not passed lowerCamelCase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : List[str] = TFEsmForMaskedLM(config=UpperCamelCase__ ) lowerCamelCase : Any = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : List[Any] = self.num_labels lowerCamelCase : Dict = TFEsmForTokenClassification(config=UpperCamelCase__ ) lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase : Dict = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Optional[Any] = config_and_inputs lowerCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Dict = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) lowerCamelCase_ : Optional[Any] = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase_ : Any = False lowerCamelCase_ : Dict = False def _lowercase ( self ) -> Any: lowerCamelCase : Tuple = TFEsmModelTester(self ) lowerCamelCase : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> List[str]: self.config_tester.run_common_tests() def _lowercase ( self ) -> str: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self ) -> List[str]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ ) def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self ) -> List[Any]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = TFEsmModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip("Protein models do not support embedding resizing." ) def _lowercase ( self ) -> List[str]: pass @unittest.skip("Protein models do not support embedding resizing." ) def _lowercase ( self ) -> Optional[Any]: pass def _lowercase ( self ) -> Tuple: lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase : Any = model.get_bias() assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) for k, v in name.items(): assert isinstance(UpperCamelCase__ , tf.Variable ) else: lowerCamelCase : str = model.get_output_embeddings() assert x is None lowerCamelCase : Optional[Any] = model.get_bias() assert name is None @require_tf class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Dict: lowerCamelCase : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Optional[int] = model(UpperCamelCase__ )[0] lowerCamelCase : Union[str, Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ ) # compare the actual values for a slice. lowerCamelCase : List[str] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def _lowercase ( self ) -> str: lowerCamelCase : Dict = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Dict = model(UpperCamelCase__ )[0] # compare the actual values for a slice. lowerCamelCase : int = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
48
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: # Initialise PyTorch model lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
48
1
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
48
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
from bisect import bisect from itertools import accumulate def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = sorted(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,key=lambda _SCREAMING_SNAKE_CASE : x[0] / x[1] ,reverse=_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase : Dict = [i[0] for i in r], [i[1] for i in r] lowerCamelCase : Optional[int] = list(accumulate(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : List[str] = bisect(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list: lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = [] for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ): lowerCamelCase : Dict = True for j in range(_SCREAMING_SNAKE_CASE ): if s[i + j] != pattern[j]: lowerCamelCase : Optional[int] = False break if match_found: position.append(_SCREAMING_SNAKE_CASE ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
48
1
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
48
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
48
1
import numpy as np import datasets SCREAMING_SNAKE_CASE__ : List[str] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' SCREAMING_SNAKE_CASE__ : int = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ (datasets.Metric ): '''simple docstring''' def _lowercase ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: # convert to numpy arrays lowerCamelCase : Tuple = np.array(UpperCamelCase__ ) lowerCamelCase : Tuple = np.array(UpperCamelCase__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction lowerCamelCase : List[Any] = X - np.mean(UpperCamelCase__ ) lowerCamelCase : Dict = np.cov(reference_distribution.T ) try: lowerCamelCase : str = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: lowerCamelCase : Any = np.linalg.pinv(UpperCamelCase__ ) lowerCamelCase : Optional[int] = np.dot(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
48
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A ( _SCREAMING_SNAKE_CASE ) -> tuple: return (data["data"], data["target"]) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray: lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Predict target for test data lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 ) return predictions def A ( ) -> None: lowerCamelCase : Dict = fetch_california_housing() lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 ) lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : int = KandinskyVaaInpaintPipeline lowerCamelCase_ : int = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] lowerCamelCase_ : Dict = [ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] lowerCamelCase_ : Any = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] lowerCamelCase_ : Tuple = False @property def _lowercase ( self ) -> List[Any]: return 32 @property def _lowercase ( self ) -> Dict: return 32 @property def _lowercase ( self ) -> List[Any]: return self.time_input_dim @property def _lowercase ( self ) -> int: return self.time_input_dim * 4 @property def _lowercase ( self ) -> int: return 100 @property def _lowercase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCamelCase : str = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase : Any = UNetaDConditionModel(**UpperCamelCase__ ) return model @property def _lowercase ( self ) -> Dict: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowercase ( self ) -> Dict: torch.manual_seed(0 ) lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs ) return model def _lowercase ( self ) -> Optional[int]: lowerCamelCase : List[str] = self.dummy_unet lowerCamelCase : int = self.dummy_movq lowerCamelCase : Optional[int] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any: lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase__ ) # create init_image lowerCamelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase : List[str] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((256, 256) ) # create mask lowerCamelCase : Dict = np.ones((64, 64) , dtype=np.floataa ) lowerCamelCase : Dict = 0 if str(UpperCamelCase__ ).startswith("mps" ): lowerCamelCase : Optional[Any] = torch.manual_seed(UpperCamelCase__ ) else: lowerCamelCase : str = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCamelCase : Optional[Any] = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[Any] = "cpu" lowerCamelCase : Optional[Any] = self.get_dummy_components() lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase__ ) lowerCamelCase : str = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : Any = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) ) lowerCamelCase : Dict = output.images lowerCamelCase : Union[str, Any] = pipe( **self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0] lowerCamelCase : int = image[0, -3:, -3:, -1] lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) lowerCamelCase : Optional[int] = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowercase ( self ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ) -> Dict: lowerCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) lowerCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa ) lowerCamelCase : Any = 0 lowerCamelCase : Optional[int] = "a hat" lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase__ ) lowerCamelCase : int = KandinskyVaaInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa ) lowerCamelCase : Any = pipeline.to(UpperCamelCase__ ) pipeline.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase : str = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase , lowerCamelCase : Tuple = pipe_prior( UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase : Any = pipeline( image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) lowerCamelCase : str = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
48
from math import sqrt def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : int = 0 lowerCamelCase : int = 0 lowerCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
48
1
from __future__ import annotations import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: if depth < 0: raise ValueError("Depth cannot be less than 0" ) if not scores: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 ,node_index * 2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,) if is_max else min( minimax(depth + 1 ,node_index * 2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ,) ) def A ( ) -> None: lowerCamelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] lowerCamelCase : Tuple = math.log(len(_SCREAMING_SNAKE_CASE ) ,2 ) print(f'''Optimal value : {minimax(0 ,0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
48
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[int] = """sequence-classification""" def __init__( self , UpperCamelCase__ ) -> List[Any]: if type(UpperCamelCase__ ) == dict: lowerCamelCase : int = Namespace(**UpperCamelCase__ ) lowerCamelCase : str = glue_output_modes[hparams.task] lowerCamelCase : int = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode ) def _lowercase ( self , **UpperCamelCase__ ) -> Tuple: return self.model(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Optional[int] = self(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs[0] lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"] lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowercase ( self ) -> str: lowerCamelCase : Any = self.hparams lowerCamelCase : Union[str, Any] = processors[args.task]() lowerCamelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ ) if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase__ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) lowerCamelCase : List[str] = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) lowerCamelCase : Dict = convert_examples_to_features( UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase__ ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader: lowerCamelCase : str = "dev" if mode == "test" else mode lowerCamelCase : int = self._feature_file(UpperCamelCase__ ) logger.info("Loading features from cached file %s" , UpperCamelCase__ ) lowerCamelCase : str = torch.load(UpperCamelCase__ ) lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None lowerCamelCase : Dict = self(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Any = outputs[:2] lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowercase ( self , UpperCamelCase__ ) -> tuple: lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCamelCase : str = np.squeeze(UpperCamelCase__ ) lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 ) lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )} lowerCamelCase : List[str] = dict(results.items() ) lowerCamelCase : Optional[int] = results return ret, preds_list, out_label_list def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowercase ( self , UpperCamelCase__ ) -> dict: lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ ) lowerCamelCase : str = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int: BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def A ( ) -> int: lowerCamelCase : int = argparse.ArgumentParser() add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() ) lowerCamelCase : str = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCamelCase : int = os.path.join( "./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,) os.makedirs(args.output_dir ) lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
1
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]: if isinstance(_SCREAMING_SNAKE_CASE ,collections.abc.Iterable ): return x return (x, x) @require_flax class UpperCamelCase__ : '''simple docstring''' def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: pass def _lowercase ( self ) -> Any: pass def _lowercase ( self ) -> Optional[Any]: pass def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : Union[str, Any] = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase__ , UpperCamelCase__ , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) lowerCamelCase : Any = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple: lowerCamelCase , lowerCamelCase : Optional[int] = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = {"vision_model": vision_model, "text_model": text_model} lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) lowerCamelCase : Any = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple: lowerCamelCase , lowerCamelCase : str = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model} lowerCamelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowerCamelCase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) lowerCamelCase : int = model(input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowerCamelCase : int = after_output[0] lowerCamelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-3 ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]: lowerCamelCase , lowerCamelCase : Dict = self.get_vision_text_model(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = {"vision_model": vision_model, "text_model": text_model} lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model( input_ids=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ ) lowerCamelCase : Tuple = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase : Tuple = to_atuple(vision_model.config.image_size ) lowerCamelCase : str = to_atuple(vision_model.config.patch_size ) lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCamelCase : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCamelCase : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: pt_model.to(UpperCamelCase__ ) pt_model.eval() # prepare inputs lowerCamelCase : int = inputs_dict lowerCamelCase : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): lowerCamelCase : int = pt_model(**UpperCamelCase__ ).to_tuple() lowerCamelCase : Any = fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ ) lowerCamelCase : Tuple = fx_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Any = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ ) pt_model_loaded.to(UpperCamelCase__ ) pt_model_loaded.eval() with torch.no_grad(): lowerCamelCase : List[str] = pt_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(UpperCamelCase__ , pt_output_loaded.numpy() , 4e-2 ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : str = VisionTextDualEncoderModel(UpperCamelCase__ ) lowerCamelCase : List[str] = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) lowerCamelCase : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ ) lowerCamelCase : List[Any] = fx_state self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: lowerCamelCase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : int = VisionTextDualEncoderModel(UpperCamelCase__ ) lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) lowerCamelCase : Any = load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params ) self.check_pt_flax_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase__ ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase__ ) @is_pt_flax_cross_test def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : int = self.prepare_config_and_inputs() lowerCamelCase : Union[str, Any] = config_inputs_dict.pop("vision_config" ) lowerCamelCase : Tuple = config_inputs_dict.pop("text_config" ) lowerCamelCase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.check_equivalence_flax_to_pt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @slow def _lowercase ( self ) -> Optional[int]: lowerCamelCase , lowerCamelCase : List[Any] = self.get_pretrained_model_and_inputs() lowerCamelCase : Any = model_a(**UpperCamelCase__ ) lowerCamelCase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) lowerCamelCase : int = model_a(**UpperCamelCase__ ) lowerCamelCase : List[str] = after_outputs[0] lowerCamelCase : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__ , 1e-5 ) @require_flax class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> int: lowerCamelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , ) lowerCamelCase : str = 13 lowerCamelCase : List[str] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowerCamelCase : Dict = random_attention_mask([batch_size, 4] ) lowerCamelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: lowerCamelCase : Dict = FlaxViTModel(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def _lowercase ( self ) -> List[Any]: lowerCamelCase : Optional[int] = FlaxViTModelTester(self ) lowerCamelCase : Any = FlaxBertModelTester(self ) lowerCamelCase : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() lowerCamelCase : str = bert_model_tester.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase : Optional[int] = vision_config_and_inputs lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Tuple: lowerCamelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=UpperCamelCase__ , text_from_pt=UpperCamelCase__ , ) lowerCamelCase : Optional[int] = 13 lowerCamelCase : List[str] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) lowerCamelCase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) lowerCamelCase : Union[str, Any] = random_attention_mask([batch_size, 4] ) lowerCamelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str: lowerCamelCase : List[Any] = FlaxCLIPVisionModel(UpperCamelCase__ ) lowerCamelCase : Dict = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def _lowercase ( self ) -> Dict: lowerCamelCase : Optional[Any] = FlaxCLIPVisionModelTester(self ) lowerCamelCase : Optional[int] = FlaxBertModelTester(self ) lowerCamelCase : List[Any] = clip_model_tester.prepare_config_and_inputs() lowerCamelCase : int = bert_model_tester.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase : Any = vision_config_and_inputs lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 ) lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) lowerCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase : Optional[int] = processor( text=["una foto di un gatto", "una foto di un cane"] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ) lowerCamelCase : Dict = model(**UpperCamelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCamelCase : Dict = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1e-3 ) )
48
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[Any] = CustomTokenizer pass
48
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int: lowerCamelCase : Tuple = 1 lowerCamelCase : int = 1 lowerCamelCase : Optional[Any] = {1: 1} for inputa in range(2 ,_SCREAMING_SNAKE_CASE ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : List[str] = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: lowerCamelCase : str = (3 * number) + 1 counter += 1 if inputa not in counters: lowerCamelCase : str = counter if counter > pre_counter: lowerCamelCase : str = inputa lowerCamelCase : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
48
1
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
import argparse import os import re SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"') def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int: with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f: lowerCamelCase : List[Any] = f.read() lowerCamelCase : str = content.split("\n" ) lowerCamelCase : int = [] lowerCamelCase : List[Any] = 0 while line_idx < len(_SCREAMING_SNAKE_CASE ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCamelCase : Optional[int] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCamelCase : List[str] = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f: f.write("\n".join(_SCREAMING_SNAKE_CASE ) ) elif "\n".join(_SCREAMING_SNAKE_CASE ) != content: return True def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]: lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )] lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames] if not overwrite and any(_SCREAMING_SNAKE_CASE ): lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
48
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: # Initialise PyTorch model lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
48
def A ( _SCREAMING_SNAKE_CASE ) -> list: if n_term == "": return [] lowerCamelCase : list = [] for temp in range(int(_SCREAMING_SNAKE_CASE ) ): series.append(f'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
48
1
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
from __future__ import annotations import requests def A ( _SCREAMING_SNAKE_CASE ) -> dict: lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(_SCREAMING_SNAKE_CASE ).json() def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]: lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids] def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str: lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE ) return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
48
1
from __future__ import annotations class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ ) -> None: lowerCamelCase : List[Any] = order # a_{0} ... a_{k} lowerCamelCase : Any = [1.0] + [0.0] * order # b_{0} ... b_{k} lowerCamelCase : Union[str, Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] lowerCamelCase : int = [0.0] * self.order # y[n-1] ... y[n-k] lowerCamelCase : Union[str, Any] = [0.0] * self.order def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None: if len(UpperCamelCase__ ) < self.order: lowerCamelCase : Dict = [1.0, *a_coeffs] if len(UpperCamelCase__ ) != self.order + 1: lowerCamelCase : str = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(UpperCamelCase__ )}''' ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != self.order + 1: lowerCamelCase : Optional[int] = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(UpperCamelCase__ )}''' ) raise ValueError(UpperCamelCase__ ) lowerCamelCase : List[Any] = a_coeffs lowerCamelCase : str = b_coeffs def _lowercase ( self , UpperCamelCase__ ) -> float: lowerCamelCase : Optional[Any] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowerCamelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowerCamelCase : Dict = self.input_history[:-1] lowerCamelCase : int = self.output_history[:-1] lowerCamelCase : List[Any] = sample lowerCamelCase : Dict = result return result
48
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { 'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model""" def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]: super().__init__(**UpperCamelCase__ ) lowerCamelCase : Dict = hidden_size lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Dict = patch_size lowerCamelCase : Tuple = image_size lowerCamelCase : Dict = initializer_range lowerCamelCase : Union[str, Any] = attention_dropout lowerCamelCase : Dict = layer_norm_eps lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : str = qkv_bias @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : Optional[int] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = """blip_2_qformer""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int: super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : int = hidden_act lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : List[str] = initializer_range lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : int = position_embedding_type lowerCamelCase : Tuple = cross_attention_frequency lowerCamelCase : Optional[int] = encoder_hidden_size @classmethod def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("model_type" ) == "blip-2": lowerCamelCase : int = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : List[str] = """blip-2""" lowerCamelCase_ : int = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str: super().__init__(**UpperCamelCase__ ) if vision_config is None: lowerCamelCase : List[Any] = {} logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." ) if qformer_config is None: lowerCamelCase : List[Any] = {} logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." ) if text_config is None: lowerCamelCase : Any = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ ) lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ ) lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt" lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings lowerCamelCase : int = self.text_config.is_encoder_decoder lowerCamelCase : Optional[Any] = num_query_tokens lowerCamelCase : int = self.vision_config.hidden_size lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCamelCase : Dict = 1.0 lowerCamelCase : List[Any] = 0.02 @classmethod def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , ) def _lowercase ( self ) -> Optional[Any]: lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ ) lowerCamelCase : Tuple = self.vision_config.to_dict() lowerCamelCase : int = self.qformer_config.to_dict() lowerCamelCase : Optional[Any] = self.text_config.to_dict() lowerCamelCase : int = self.__class__.model_type return output
48
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None: warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
48
import random from .binary_exp_mod import bin_exp_mod def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd lowerCamelCase : List[Any] = n - 1 lowerCamelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) lowerCamelCase : Optional[Any] = 0 while count < prec: lowerCamelCase : str = random.randint(2 ,n - 1 ) lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if b != 1: lowerCamelCase : str = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: lowerCamelCase : Tuple = False break lowerCamelCase : int = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
48
1
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , ) -> List[Any]: lowerCamelCase : List[Any] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Optional[Any] = image_size lowerCamelCase : int = patch_size lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : Optional[Any] = is_training lowerCamelCase : List[str] = use_labels lowerCamelCase : Tuple = hidden_size lowerCamelCase : str = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : str = hidden_act lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Dict = type_sequence_label_size lowerCamelCase : Union[str, Any] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase : Any = (image_size // patch_size) ** 2 lowerCamelCase : str = num_patches + 1 def _lowercase ( self ) -> List[str]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: lowerCamelCase : Any = FlaxViTModel(config=UpperCamelCase__ ) lowerCamelCase : List[Any] = model(UpperCamelCase__ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) lowerCamelCase : Any = (self.image_size, self.image_size) lowerCamelCase : Union[str, Any] = (self.patch_size, self.patch_size) lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int: lowerCamelCase : str = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FlaxViTForImageClassification(config=UpperCamelCase__ ) lowerCamelCase : Any = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : Tuple = 1 lowerCamelCase : Any = FlaxViTForImageClassification(UpperCamelCase__ ) lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Any = config_and_inputs lowerCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ : Optional[int] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase ( self ) -> None: lowerCamelCase : Any = FlaxViTModelTester(self ) lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self ) -> int: self.config_tester.run_common_tests() def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Dict = model_class(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowercase ( self ) -> Dict: lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = model_class(UpperCamelCase__ ) @jax.jit def model_jitted(UpperCamelCase__ , **UpperCamelCase__ ): return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ ) with self.subTest("JIT Enabled" ): lowerCamelCase : Optional[Any] = model_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowerCamelCase : List[Any] = model_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase ( self ) -> List[Any]: for model_class_name in self.all_model_classes: lowerCamelCase : str = model_class_name.from_pretrained("google/vit-base-patch16-224" ) lowerCamelCase : Optional[int] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(UpperCamelCase__ )
48
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'} SCREAMING_SNAKE_CASE__ : int = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } SCREAMING_SNAKE_CASE__ : str = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Optional[int] = 2 SCREAMING_SNAKE_CASE__ : List[str] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = 4 class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = VOCAB_FILES_NAMES lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : List[str] = """left""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) lowerCamelCase : Any = 3 lowerCamelCase : Optional[Any] = do_lower_case lowerCamelCase : List[Any] = remove_space lowerCamelCase : str = keep_accents lowerCamelCase : List[Any] = vocab_file lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase__ ) @property def _lowercase ( self ) -> Optional[Any]: return len(self.sp_model ) def _lowercase ( self ) -> Optional[int]: lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[Any]: lowerCamelCase : Optional[int] = self.__dict__.copy() lowerCamelCase : Union[str, Any] = None return state def __setstate__( self , UpperCamelCase__ ) -> int: lowerCamelCase : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCamelCase : Any = {} lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self , UpperCamelCase__ ) -> Any: if self.remove_space: lowerCamelCase : Dict = " ".join(inputs.strip().split() ) else: lowerCamelCase : Union[str, Any] = inputs lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ ) lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] ) if self.do_lower_case: lowerCamelCase : List[str] = outputs.lower() return outputs def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ ) lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) lowerCamelCase : Dict = [] for piece in pieces: if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCamelCase : Union[str, Any] = cur_pieces[1:] else: lowerCamelCase : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCamelCase__ ) else: new_pieces.append(UpperCamelCase__ ) return new_pieces def _lowercase ( self , UpperCamelCase__ ) -> int: return self.sp_model.PieceToId(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Tuple: return self.sp_model.IdToPiece(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> List[str]: lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip() return out_string def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str: lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ ) lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowerCamelCase : Any = [] lowerCamelCase : Any = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) lowerCamelCase : int = [] sub_texts.append(UpperCamelCase__ ) else: current_sub_text.append(UpperCamelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ) lowerCamelCase : Tuple = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ ) return clean_text else: return text def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : str = [self.sep_token_id] lowerCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] return ([0] * len(UpperCamelCase__ )) + [1, 1] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]: lowerCamelCase : Any = [self.sep_token_id] lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase : Union[str, Any] = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , "wb" ) as fi: lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
48
1
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=() ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="no" ,_SCREAMING_SNAKE_CASE="29500" ) -> Optional[int]: lowerCamelCase : Optional[Any] = False lowerCamelCase : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): lowerCamelCase : Dict = True elif "IPython" in sys.modules: lowerCamelCase : Dict = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: lowerCamelCase : Tuple = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,_SCREAMING_SNAKE_CASE ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: lowerCamelCase : Any = 8 lowerCamelCase : Tuple = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,distributed_type="TPU" ) print(f'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*_SCREAMING_SNAKE_CASE ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_SCREAMING_SNAKE_CASE ,master_addr="127.0.01" ,master_port=_SCREAMING_SNAKE_CASE ,mixed_precision=_SCREAMING_SNAKE_CASE ): lowerCamelCase : List[str] = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,distributed_type="MULTI_GPU" ) print(f'''Launching training on {num_processes} GPUs.''' ) try: start_processes(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCamelCase : List[str] = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=() ,_SCREAMING_SNAKE_CASE=2 ) -> List[Any]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_SCREAMING_SNAKE_CASE ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): lowerCamelCase : List[Any] = PrepareForLaunch(_SCREAMING_SNAKE_CASE ,debug=_SCREAMING_SNAKE_CASE ) start_processes(_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,nprocs=_SCREAMING_SNAKE_CASE ,start_method="fork" )
48
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } SCREAMING_SNAKE_CASE__ : Any = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : int = EfficientNetConfig() lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"] lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"] lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"] lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"] lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"] lowerCamelCase : Tuple = "huggingface/label-files" lowerCamelCase : List[str] = "imagenet-1k-id2label.json" lowerCamelCase : Any = 1000 lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) ) lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowerCamelCase : Tuple = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def A ( ) -> int: lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ) return im def A ( _SCREAMING_SNAKE_CASE ) -> str: lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : str = EfficientNetImageProcessor( size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,) return preprocessor def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )] lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )} lowerCamelCase : List[Any] = [] rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") ) rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") ) rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") ) rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") ) rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") ) for b in block_names: lowerCamelCase : Dict = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") ) rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") ) rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") ) rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") ) rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") ) lowerCamelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: lowerCamelCase : List[str] = "efficientnet." + item[1] lowerCamelCase : int = "classifier.weight" lowerCamelCase : Union[str, Any] = "classifier.bias" return key_mapping def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict: for key, value in tf_params.items(): if "normalization" in key: continue lowerCamelCase : Tuple = key_mapping[key] if "_conv" in key and "kernel" in key: lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 ) elif "depthwise_kernel" in key: lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 ) elif "kernel" in key: lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,) lowerCamelCase : List[Any] = original_model.trainable_variables lowerCamelCase : Tuple = original_model.non_trainable_variables lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowerCamelCase : List[str] = param.numpy() lowerCamelCase : int = list(tf_params.keys() ) # Load HuggingFace model lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowerCamelCase : Tuple = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("Converting parameters..." ) lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" ) # HF model inference hf_model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = outputs.logits.detach().numpy() # Original model inference lowerCamelCase : Optional[Any] = False lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"] lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST ) lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 ) lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same." print("Model outputs match!" ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) lowerCamelCase : int = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
48
1
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def A ( ) -> Optional[Any]: lowerCamelCase : str = torch.nn.Linear(2 ,4 ) lowerCamelCase : Optional[int] = torch.optim.AdamW(model.parameters() ,lr=1.0 ) lowerCamelCase : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(_SCREAMING_SNAKE_CASE ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 ) lowerCamelCase : Optional[int] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) lowerCamelCase : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]: lowerCamelCase : Optional[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_SCREAMING_SNAKE_CASE ) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' @require_cuda def _lowercase ( self ) -> Any: lowerCamelCase : Optional[Any] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : List[str] = Accelerator(cpu=UpperCamelCase__ ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = Accelerator() lowerCamelCase : Optional[Any] = GradientState() assert state.num_steps == 1 lowerCamelCase : Any = 4 assert state.num_steps == 4 assert state.sync_gradients is True lowerCamelCase : Dict = False assert state.sync_gradients is False GradientState._reset_state() def _lowercase ( self ) -> Dict: lowerCamelCase : Tuple = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = create_components() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : List[Any] = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _lowercase ( self ) -> Union[str, Any]: lowerCamelCase : str = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _lowercase ( self ) -> int: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*UpperCamelCase__ , **UpperCamelCase__ ): pass with patch("torch.cuda.set_device" , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): lowerCamelCase : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def _lowercase ( self ) -> Dict: lowerCamelCase : Union[str, Any] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[str] = get_signature(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) def _lowercase ( self ) -> List[str]: lowerCamelCase : List[str] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = create_components() accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = get_signature(UpperCamelCase__ ) # saving hook def save_config(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = {"class_name": models[0].__class__.__name__} with open(os.path.join(UpperCamelCase__ , "data.json" ) , "w" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) # loading hook def load_config(UpperCamelCase__ , UpperCamelCase__ ): with open(os.path.join(UpperCamelCase__ , "data.json" ) , "r" ) as f: lowerCamelCase : str = json.load(UpperCamelCase__ ) lowerCamelCase : int = config["class_name"] lowerCamelCase : Any = accelerator.register_save_state_pre_hook(UpperCamelCase__ ) lowerCamelCase : Dict = accelerator.register_load_state_pre_hook(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match with hooks load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCamelCase : Optional[int] = "random" # make sure loaded weights match with hooks accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(UpperCamelCase__ ) # make sure random weights don't match with hooks removed load_random_weights(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCamelCase : List[Any] = "random" # make sure loaded weights match with hooks removed accelerator.load_state(UpperCamelCase__ ) self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = create_components() lowerCamelCase : Optional[int] = None # This should work lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertTrue(dummy_obj is None ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[str] = Accelerator() lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = create_components() lowerCamelCase : Optional[int] = [1, 2, 3] # This should work lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(UpperCamelCase__ , "_is_accelerate_prepared" , UpperCamelCase__ ) , UpperCamelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def _lowercase ( self ) -> int: from transformers import AutoModelForCausalLM lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map={"": 0} , ) lowerCamelCase : Any = Accelerator() # This should work lowerCamelCase : Tuple = accelerator.prepare(UpperCamelCase__ ) @slow @require_bnb def _lowercase ( self ) -> str: from transformers import AutoModelForCausalLM lowerCamelCase : Tuple = Accelerator() with init_empty_weights(): lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCamelCase : List[Any] = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Optional[int] = "cpu" lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ ) # This should not work and get value error with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : List[str] = accelerator.prepare(UpperCamelCase__ ) @slow @require_bnb @require_multi_gpu def _lowercase ( self ) -> int: from transformers import AutoModelForCausalLM lowerCamelCase : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): lowerCamelCase : str = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCamelCase : Union[str, Any] = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Dict = 1 lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , ) lowerCamelCase : Optional[Any] = Accelerator() # This should not work and get value error with self.assertRaises(UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _lowercase ( self ) -> str: from transformers import AutoModelForCausalLM with init_empty_weights(): lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) lowerCamelCase : int = infer_auto_device_map(UpperCamelCase__ ) lowerCamelCase : Any = 1 lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , ) lowerCamelCase : str = Accelerator() # This should work lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ ) @require_cuda def _lowercase ( self ) -> Dict: lowerCamelCase : str = torch.nn.Linear(10 , 10 ) lowerCamelCase : int = torch.optim.SGD(model.parameters() , lr=0.01 ) lowerCamelCase : int = Accelerator(cpu=UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = accelerator.prepare(UpperCamelCase__ )
48
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]: if config_name_or_path is None: lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base" if generator_tokenizer_name_or_path is None: lowerCamelCase : Dict = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowerCamelCase : Any = question_encoder_name_or_path lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration # Save model. lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = gen_config lowerCamelCase : Optional[Any] = question_encoder_config lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ) rag_model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Sanity check. model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizers. lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" ) lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
48
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input SCREAMING_SNAKE_CASE__ : Dict = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def A ( ) -> Optional[Any]: lowerCamelCase : Union[str, Any] = _ask_options( "In which compute environment are you running?" ,["This machine", "AWS (Amazon SageMaker)"] ,_convert_compute_environment ,) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase : Union[str, Any] = get_sagemaker_input() else: lowerCamelCase : Tuple = get_cluster_input() return config def A ( _SCREAMING_SNAKE_CASE=None ) -> str: if subparsers is not None: lowerCamelCase : int = subparsers.add_parser("config" ,description=_SCREAMING_SNAKE_CASE ) else: lowerCamelCase : Optional[Any] = argparse.ArgumentParser("Accelerate config command" ,description=_SCREAMING_SNAKE_CASE ) parser.add_argument( "--config_file" ,default=_SCREAMING_SNAKE_CASE ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=_SCREAMING_SNAKE_CASE ) return parser def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : int = get_user_input() if args.config_file is not None: lowerCamelCase : Optional[int] = args.config_file else: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.makedirs(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(_SCREAMING_SNAKE_CASE ) else: config.to_yaml_file(_SCREAMING_SNAKE_CASE ) print(f'''accelerate configuration saved at {config_file}''' ) def A ( ) -> List[str]: lowerCamelCase : int = config_command_parser() lowerCamelCase : Tuple = parser.parse_args() config_command(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
48
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
1