code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') UpperCAmelCase_ : Dict = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) snake_case__ : bool = field( default=lowercase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) snake_case__ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case__ : bool = field( default=lowercase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''The input training data file (a text file).'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) snake_case__ : bool = field( default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. If passed, sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case__ : bool = field( default=lowercase__ , metadata={ '''help''': ( '''Whether to pad all samples to the maximum sentence length. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch. More ''' '''efficient on GPU but very bad for TPU.''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: if self.train_file is not None: a_ : str = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: a_ : Dict = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : PreTrainedTokenizerBase snake_case__ : Union[bool, str, PaddingStrategy] = True snake_case__ : Optional[int] = None snake_case__ : Optional[int] = None def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str: a_ : str = 'label' if 'label' in features[0].keys() else 'labels' a_ : Union[str, Any] = [feature.pop(SCREAMING_SNAKE_CASE__ ) for feature in features] a_ : Any = len(SCREAMING_SNAKE_CASE__ ) a_ : int = len(features[0]['input_ids'] ) a_ : int = [ [{k: v[i] for k, v in feature.items()} for i in range(SCREAMING_SNAKE_CASE__ )] for feature in features ] a_ : Optional[Any] = list(chain(*SCREAMING_SNAKE_CASE__ ) ) a_ : Tuple = self.tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten a_ : Union[str, Any] = {k: v.view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ) for k, v in batch.items()} # Add back labels a_ : Dict = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.intaa ) return batch def SCREAMING_SNAKE_CASE_ ( ) -> str: """simple docstring""" a_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a_ , a_ , a_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a_ , a_ , a_ : Optional[int] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __A , __A ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a_ : Dict = training_args.get_process_log_level() logger.setLevel(__A ) datasets.utils.logging.set_verbosity(__A ) transformers.utils.logging.set_verbosity(__A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. a_ : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a_ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: a_ : List[str] = {} if data_args.train_file is not None: a_ : int = data_args.train_file if data_args.validation_file is not None: a_ : Dict = data_args.validation_file a_ : Tuple = data_args.train_file.split('.' )[-1] a_ : List[str] = load_dataset( __A , data_files=__A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. a_ : Tuple = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a_ : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) a_ : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) a_ : Optional[int] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. a_ : int = [F"""ending{i}""" for i in range(4 )] a_ : Dict = 'sent1' a_ : Dict = 'sent2' if data_args.max_seq_length is None: a_ : int = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) a_ : List[Any] = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) a_ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__A : Tuple ): a_ : Optional[int] = [[context] * 4 for context in examples[context_name]] a_ : Union[str, Any] = examples[question_header_name] a_ : Optional[Any] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__A ) ] # Flatten out a_ : Optional[int] = list(chain(*__A ) ) a_ : Any = list(chain(*__A ) ) # Tokenize a_ : Any = tokenizer( __A , __A , truncation=__A , max_length=__A , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__A ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) a_ : Optional[int] = raw_datasets['train'] if data_args.max_train_samples is not None: a_ : str = min(len(__A ) , data_args.max_train_samples ) a_ : List[str] = train_dataset.select(range(__A ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): a_ : int = train_dataset.map( __A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) a_ : Union[str, Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: a_ : List[str] = min(len(__A ) , data_args.max_eval_samples ) a_ : int = eval_dataset.select(range(__A ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): a_ : int = eval_dataset.map( __A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator a_ : List[Any] = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__A , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__A : Optional[Any] ): a_ , a_ : List[Any] = eval_predictions a_ : Tuple = np.argmax(__A , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer a_ : str = Trainer( model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__A , data_collator=__A , compute_metrics=__A , ) # Training if training_args.do_train: a_ : Any = None if training_args.resume_from_checkpoint is not None: a_ : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: a_ : Optional[int] = last_checkpoint a_ : Dict = trainer.train(resume_from_checkpoint=__A ) trainer.save_model() # Saves the tokenizer too for easy upload a_ : Union[str, Any] = train_result.metrics a_ : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__A ) ) a_ : Any = min(__A , len(__A ) ) trainer.log_metrics('train' , __A ) trainer.save_metrics('train' , __A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) a_ : Dict = trainer.evaluate() a_ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A ) a_ : List[Any] = min(__A , len(__A ) ) trainer.log_metrics('eval' , __A ) trainer.save_metrics('eval' , __A ) a_ : Optional[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__A ) else: trainer.create_model_card(**__A ) def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Any: """simple docstring""" main() if __name__ == "__main__": main()
32
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
1
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool: """simple docstring""" a_ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
32
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list: """simple docstring""" a_ : Any = len(__A ) for i in range(1 , __A ): a_ : Optional[Any] = collection[i] a_ : Tuple = 0 a_ : Optional[Any] = i - 1 while low <= high: a_ : str = (low + high) // 2 if val < collection[mid]: a_ : List[Any] = mid - 1 else: a_ : Optional[Any] = mid + 1 for j in range(__A , __A , -1 ): a_ : List[str] = collection[j - 1] a_ : int = val return collection if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ : Any = [int(item) for item in user_input.split(',')] print(binary_insertion_sort(unsorted))
32
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" a_ : Tuple = [] for line in lines: a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments if line: filtered_lines.append(__A ) a_ : Tuple = '\n'.join(__A ) # Make a hash from all this code a_ : Tuple = full_str.encode('utf-8' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Dict = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
32
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCAmelCase_ : Any = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = vocab_size a_ : List[str] = hidden_size a_ : List[str] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Optional[int] = intermediate_size a_ : int = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : str = max_position_embeddings a_ : List[str] = type_vocab_size a_ : List[str] = initializer_range a_ : Tuple = layer_norm_eps a_ : Optional[int] = embedding_size a_ : List[Any] = head_ratio a_ : List[Any] = conv_kernel_size a_ : Tuple = num_groups a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list[list]: """simple docstring""" a_ : List[str] = current_set.copy() for row_index, row in enumerate(__A ): a_ : List[str] = row[0] for column_index, column in enumerate(__A ): if magnitude == 0: a_ : Any = column continue a_ : List[str] = column / magnitude # Subtract to cancel term a_ : Any = current_set[0] a_ : Optional[int] = [first_row] a_ : Dict = current_set[1::] for row in current_set: a_ : str = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__A ) continue for column_index in range(len(__A ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__A ) # Create next recursion iteration set if len(final_set[0] ) != 3: a_ : Dict = final_set[0] a_ : Union[str, Any] = [] a_ : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) a_ : List[Any] = simplify(__A ) for i in range(len(__A ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __A ) a_ : Any = resultant return final_set def SCREAMING_SNAKE_CASE_ ( __A : list[list] ) -> list: """simple docstring""" if len(__A ) == 0: raise IndexError('solve_simultaneous() requires n lists of length n+1' ) a_ : List[Any] = len(__A ) + 1 if any(len(__A ) != _length for item in equations ): raise IndexError('solve_simultaneous() requires n lists of length n+1' ) for row in equations: if any(not isinstance(__A , (int, float) ) for column in row ): raise ValueError('solve_simultaneous() requires lists of integers' ) if len(__A ) == 1: return [equations[0][-1] / equations[0][0]] a_ : Union[str, Any] = equations.copy() if any(0 in row for row in data_set ): a_ : Any = data_set.copy() a_ : Tuple = [] for row_index, row in enumerate(__A ): if 0 not in row: a_ : Any = data_set.pop(__A ) break if not full_row: raise ValueError('solve_simultaneous() requires at least 1 full equation' ) data_set.insert(0 , __A ) a_ : List[Any] = data_set.copy() a_ : Optional[Any] = simplify(__A ) a_ : Union[str, Any] = simplified[::-1] a_ : list = [] for row in simplified: a_ : Tuple = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue a_ : int = row.copy()[: len(__A ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__A ) == 0: solutions.append(0 ) continue a_ : List[Any] = temp_row[1::] a_ : Optional[int] = temp_row[::-1] for column_index, column in enumerate(__A ): current_solution -= column * solutions[column_index] solutions.append(__A ) a_ : Tuple = [] for item in solutions: final.append(float(round(__A , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Any = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]: """simple docstring""" a_ : Any = int(__A ) # Initialize Result a_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__A ): # Find denominations while int(__A ) >= int(__A ): total_value -= int(__A ) answer.append(__A ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F'Following is minimal change for {value}: ') UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
32
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
1
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str: """simple docstring""" a_ : int = len(__A ) a_ : int = len(__A ) a_ : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) a_ : list = [] for char_count in range(__A ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__A ) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
32
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Optional[int] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mask2former''' snake_case__ : Any = ['''swin'''] snake_case__ : str = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) a_ : Dict = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = backbone_config.pop('model_type' ) a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) a_ : Dict = backbone_config a_ : List[str] = feature_size a_ : List[str] = mask_feature_size a_ : int = hidden_dim a_ : Dict = encoder_feedforward_dim a_ : str = activation_function a_ : List[str] = encoder_layers a_ : List[str] = decoder_layers a_ : Dict = num_attention_heads a_ : str = dropout a_ : Tuple = dim_feedforward a_ : List[str] = pre_norm a_ : Optional[int] = enforce_input_projection a_ : Any = common_stride a_ : Optional[int] = ignore_value a_ : int = num_queries a_ : Tuple = no_object_weight a_ : Dict = class_weight a_ : Optional[int] = mask_weight a_ : Optional[int] = dice_weight a_ : str = train_num_points a_ : List[str] = oversample_ratio a_ : List[Any] = importance_sample_ratio a_ : Any = init_std a_ : Union[str, Any] = init_xavier_std a_ : Union[str, Any] = use_auxiliary_loss a_ : Dict = feature_strides a_ : List[str] = output_auxiliary_logits a_ : Dict = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : List[Any] = self.backbone_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output
32
1
import numpy as np def SCREAMING_SNAKE_CASE_ ( __A : np.array ) -> np.array: """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } UpperCAmelCase_ : Union[str, Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[Any] , __A : Optional[Any] , __A : int , __A : Dict ) -> List[Any]: """simple docstring""" for attribute in key.split('.' ): a_ : Optional[Any] = getattr(__A , __A ) if weight_type is not None: a_ : Optional[int] = getattr(__A , __A ).shape else: a_ : Any = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": a_ : str = value elif weight_type == "weight_g": a_ : Tuple = value elif weight_type == "weight_v": a_ : Optional[int] = value elif weight_type == "bias": a_ : Union[str, Any] = value else: a_ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[Any] ) -> Any: """simple docstring""" a_ : Any = [] a_ : Dict = fairseq_model.state_dict() a_ : Optional[int] = hf_model.feature_extractor a_ : str = hf_model.adapter for name, value in fairseq_dict.items(): a_ : str = False if "conv_layers" in name: load_conv_layer( __A , __A , __A , __A , hf_model.config.feat_extract_norm == 'group' , ) a_ : Dict = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(__A , __A , __A , __A ) a_ : Tuple = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: a_ : Union[str, Any] = True if "*" in mapped_key: a_ : List[Any] = name.split(__A )[0].split('.' )[-2] a_ : str = mapped_key.replace('*' , __A ) if "weight_g" in name: a_ : Dict = 'weight_g' elif "weight_v" in name: a_ : Optional[Any] = 'weight_v' elif "bias" in name: a_ : Optional[int] = 'bias' elif "weight" in name: a_ : Dict = 'weight' else: a_ : Any = None set_recursively(__A , __A , __A , __A , __A ) continue if not is_used: unused_weights.append(__A ) logger.warning(F"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Dict , __A : int , __A : int , __A : List[str] ) -> List[Any]: """simple docstring""" a_ : int = full_name.split('conv_layers.' )[-1] a_ : Dict = name.split('.' ) a_ : str = int(items[0] ) a_ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a_ : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a_ : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a_ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a_ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : Optional[int] , __A : Optional[int] ) -> List[Any]: """simple docstring""" a_ : List[Any] = full_name.split('adaptor.' )[-1] a_ : str = name.split('.' ) if items[1].isdigit(): a_ : Dict = int(items[1] ) else: a_ : int = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.""" a_ : int = value logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.""" a_ : Union[str, Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.""" a_ : List[str] = value logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.""" a_ : str = value logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" ) elif isinstance(__A , __A ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.""" a_ : Optional[Any] = value logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.""" a_ : List[Any] = value logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" ) else: unused_weights.append(__A ) def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> List[Any]: """simple docstring""" a_ , a_ : str = emb.weight.shape a_ : Optional[int] = nn.Linear(__A , __A , bias=__A ) a_ : Optional[int] = emb.weight.data return lin_layer @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Optional[int] , __A : Dict , __A : Optional[Any] , __A : str , __A : List[Any] , __A : Tuple , __A : Dict , __A : Optional[int] , __A : str , __A : str , ) -> int: """simple docstring""" a_ : Any = WavaVecaConfig.from_pretrained( __A , add_adapter=__A , adapter_stride=__A , adapter_kernel_size=__A , use_auth_token=__A , output_hidden_size=__A , ) a_ : Union[str, Any] = MBartConfig.from_pretrained(__A ) # load model a_ , a_ , a_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) a_ : int = model[0].eval() # load feature extractor a_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__A , use_auth_token=__A ) # set weights for wav2vec2 encoder a_ : int = WavaVecaModel(__A ) recursively_load_weights_wavaveca(model.encoder , __A ) # load decoder weights a_ : Tuple = MBartForCausalLM(__A ) a_ , a_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A ) logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) a_ : Union[str, Any] = SpeechEncoderDecoderModel(encoder=__A , decoder=__A ) a_ : List[str] = False a_ : List[Any] = MBartaaTokenizer(__A ) tokenizer.save_pretrained(__A ) a_ : Optional[Any] = hf_wavavec.config.to_dict() a_ : Dict = tokenizer.pad_token_id a_ : int = tokenizer.bos_token_id a_ : Tuple = tokenizer.eos_token_id a_ : str = 'mbart50' a_ : Union[str, Any] = 'wav2vec2' a_ : Union[str, Any] = tokenizer.eos_token_id a_ : Optional[Any] = 25_00_04 a_ : List[Any] = tokenizer.eos_token_id a_ : Optional[int] = SpeechEncoderDecoderConfig.from_dict(__A ) hf_wavavec.save_pretrained(__A ) feature_extractor.save_pretrained(__A ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-xls-r-1b', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/mbart-large-50-one-to-many-mmt', type=str, help='Path to hf decoder checkpoint config', ) parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers') parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers') parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers') parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim') parser.add_argument('--start_token_id', default=25_0004, type=int, help='`decoder_start_token_id` of model config') UpperCAmelCase_ : str = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any] ) -> int: """simple docstring""" a_ : Dict = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : str ) -> Tuple: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) a_ : Dict = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) a_ : str = in_proj_weight[ : encoder_config.hidden_size, : ] a_ : Any = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] a_ : List[str] = in_proj_weight[ -encoder_config.hidden_size :, : ] def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] , __A : Optional[Any] ) -> Any: """simple docstring""" a_ : Optional[int] = dct.pop(__A ) a_ : List[str] = val def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> List[str]: """simple docstring""" if "handwritten" in checkpoint_url: a_ : Dict = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: a_ : int = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' a_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Dict ) -> List[Any]: """simple docstring""" a_ : Tuple = ViTConfig(image_size=3_84 , qkv_bias=__A ) a_ : Union[str, Any] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: a_ : List[Any] = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder a_ : Optional[int] = 10_24 a_ : Optional[Any] = 40_96 a_ : Tuple = 24 a_ : Optional[Any] = 16 a_ : str = 10_24 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: a_ : Optional[int] = False a_ : int = 'relu' a_ : Tuple = 10_24 a_ : List[Any] = True a_ : List[str] = False a_ : str = False # load HuggingFace model a_ : List[str] = ViTModel(__A , add_pooling_layer=__A ) a_ : Dict = TrOCRForCausalLM(__A ) a_ : List[str] = VisionEncoderDecoderModel(encoder=__A , decoder=__A ) model.eval() # load state_dict of original model, rename some keys a_ : Optional[int] = torch.hub.load_state_dict_from_url(__A , map_location='cpu' , check_hash=__A )['model'] a_ : int = create_rename_keys(__A , __A ) for src, dest in rename_keys: rename_key(__A , __A , __A ) read_in_q_k_v(__A , __A ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): a_ : int = state_dict.pop(__A ) if key.startswith('decoder' ) and "output_projection" not in key: a_ : Optional[int] = val else: a_ : Dict = val # load state dict model.load_state_dict(__A ) # Check outputs on an image a_ : Optional[int] = ViTImageProcessor(size=encoder_config.image_size ) a_ : Optional[int] = RobertaTokenizer.from_pretrained('roberta-large' ) a_ : Union[str, Any] = TrOCRProcessor(__A , __A ) a_ : Any = processor(images=prepare_img(__A ) , return_tensors='pt' ).pixel_values # verify logits a_ : Tuple = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) a_ : Dict = model(pixel_values=__A , decoder_input_ids=__A ) a_ : List[str] = outputs.logits a_ : Any = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: a_ : Any = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: a_ : Union[str, Any] = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: a_ : Union[str, Any] = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: a_ : Tuple = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , __A , atol=1e-3 ), "First elements of logits not as expected" Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__A ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
32
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ : Dict = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') UpperCAmelCase_ : str = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case__ : Optional[str] = field(default=lowercase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case__ : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case__ : int = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case__ : float = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: a_ : List[Any] = {} if self.train_dir is not None: a_ : List[str] = self.train_dir if self.validation_dir is not None: a_ : List[Any] = self.validation_dir a_ : Any = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : str = field( default=lowercase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowercase__ )} , ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case__ : Optional[str] = field( default=lowercase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case__ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case__ : str = field(default=lowercase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case__ : bool = field( default=lowercase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case__ : Optional[int] = field( default=lowercase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Tuple=1_9_2 , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.6 ) -> List[str]: a_ : int = input_size a_ : int = mask_patch_size a_ : str = model_patch_size a_ : Optional[int] = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) a_ : Optional[Any] = self.input_size // self.mask_patch_size a_ : int = self.mask_patch_size // self.model_patch_size a_ : Dict = self.rand_size**2 a_ : int = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Optional[int] ) -> Optional[int]: a_ : Any = np.random.permutation(self.token_count )[: self.mask_count] a_ : str = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE__ ) a_ : str = 1 a_ : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) ) a_ : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[Any]: """simple docstring""" a_ : Optional[Any] = torch.stack([example['pixel_values'] for example in examples] ) a_ : Optional[int] = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" a_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a_ , a_ , a_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a_ , a_ , a_ : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , __A , __A ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a_ : int = training_args.get_process_log_level() logger.setLevel(__A ) transformers.utils.logging.set_verbosity(__A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. a_ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a_ : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. a_ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. a_ : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0: a_ : str = ds['train'].train_test_split(data_args.train_val_split ) a_ : Optional[Any] = split['train'] a_ : Union[str, Any] = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a_ : Union[str, Any] = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: a_ : Tuple = AutoConfig.from_pretrained(model_args.config_name_or_path , **__A ) elif model_args.model_name_or_path: a_ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **__A ) else: a_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__A , 'decoder_type' ): a_ : Tuple = 'simmim' # adapt config a_ : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size a_ : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size a_ : Dict = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: a_ : List[str] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__A ) elif model_args.model_name_or_path: a_ : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__A ) else: a_ : str = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } a_ : List[str] = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: a_ : Any = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) a_ : List[str] = AutoModelForMaskedImageModeling.from_config(__A ) if training_args.do_train: a_ : List[Any] = ds['train'].column_names else: a_ : List[str] = ds['validation'].column_names if data_args.image_column_name is not None: a_ : List[str] = data_args.image_column_name elif "image" in column_names: a_ : Optional[Any] = 'image' elif "img" in column_names: a_ : Tuple = 'img' else: a_ : Any = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py a_ : Optional[int] = Compose( [ Lambda(lambda __A : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator a_ : int = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__A : Optional[Any] ): a_ : Optional[Any] = [transforms(__A ) for image in examples[image_column_name]] a_ : List[str] = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: a_ : Any = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__A ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: a_ : Optional[Any] = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__A ) # Initialize our trainer a_ : str = Trainer( model=__A , args=__A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , ) # Training if training_args.do_train: a_ : Any = None if training_args.resume_from_checkpoint is not None: a_ : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: a_ : Tuple = last_checkpoint a_ : Any = trainer.train(resume_from_checkpoint=__A ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: a_ : List[str] = trainer.evaluate() trainer.log_metrics('eval' , __A ) trainer.save_metrics('eval' , __A ) # Write model card and (optionally) push to hub a_ : Optional[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**__A ) else: trainer.create_model_card(**__A ) if __name__ == "__main__": main()
32
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) UpperCAmelCase_ : Dict = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Tuple = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
32
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @staticmethod @abstractmethod def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Tuple: raise NotImplementedError() @abstractmethod def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: raise NotImplementedError()
32
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Any = GPTSanJapaneseTokenizer snake_case__ : Tuple = False snake_case__ : str = {'''do_clean_text''': False, '''add_prefix_space''': False} def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: super().setUp() # fmt: off a_ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on a_ : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 a_ : List[Any] = {'unk_token': '<unk>'} a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' a_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict: a_ , a_ : Union[str, Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: a_ : List[str] = self.get_tokenizer() # Testing tokenization a_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。' a_ : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens a_ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] a_ : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens a_ : int = tokens + [tokenizer.unk_token] a_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization a_ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' a_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' a_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : List[Any] = 'こんにちは、世界。' a_ : int = 'こんばんは、㔺界。😀' a_ : Dict = 'こんにちは、世界。こんばんは、世界。😀' a_ : Optional[int] = tokenizer.encode(prefix_text + input_text ) a_ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : str = 'こんにちは、世界。' a_ : List[str] = 'こんばんは、㔺界。😀' a_ : str = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Tuple = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1) a_ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] a_ : Tuple = [1] + [1] * (len_prefix) + [0] * (len_text + 1) a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids a_ : Union[str, Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: a_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[int] = tokenizer.encode('あンいワ' ) a_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' ) a_ : Dict = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: a_ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # fmt: off a_ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] a_ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: # tokenizer has no padding token pass
32
1
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : List[str]=3_7 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=2 , ) -> str: a_ : Union[str, Any] = parent a_ : str = batch_size a_ : List[Any] = image_size a_ : int = patch_size a_ : Optional[int] = num_channels a_ : int = is_training a_ : List[Any] = use_labels a_ : Any = hidden_size a_ : Optional[Any] = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Optional[Any] = intermediate_size a_ : Dict = hidden_act a_ : Tuple = hidden_dropout_prob a_ : Optional[int] = attention_probs_dropout_prob a_ : Union[str, Any] = type_sequence_label_size a_ : List[Any] = initializer_range a_ : Union[str, Any] = scope a_ : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) a_ : int = (image_size // patch_size) ** 2 a_ : List[str] = num_patches + 2 def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ : Tuple = None if self.use_labels: a_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str: a_ : Dict = DeiTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: a_ : str = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a_ : List[str] = 1 a_ : List[Any] = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: a_ : Tuple = self.type_sequence_label_size a_ : Dict = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a_ : int = 1 a_ : Optional[int] = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a_ : str = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[int] = config_and_inputs a_ : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Dict = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) snake_case__ : Dict = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) snake_case__ : Union[str, Any] = False snake_case__ : Union[str, Any] = False snake_case__ : Dict = False def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : List[str] = DeiTModelTester(self ) a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: pass def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Any = model_class(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ : List[Any] = [*signature.parameters.keys()] a_ : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Union[str, Any]: a_ : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: if not self.model_tester.is_training: return a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() a_ : Optional[int] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(SCREAMING_SNAKE_CASE__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() a_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a_ : Optional[Any] = False a_ : Any = True for model_class in self.all_model_classes: if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue a_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ ) model.gradient_checkpointing_enable() model.to(SCREAMING_SNAKE_CASE__ ) model.train() a_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() a_ : List[Any] = [ {'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(SCREAMING_SNAKE_CASE__ ), *get_values(SCREAMING_SNAKE_CASE__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ): a_ : Optional[int] = problem_type['title'] a_ : Union[str, Any] = problem_type['num_labels'] a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() a_ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if problem_type["num_labels"] > 1: a_ : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] ) a_ : Tuple = inputs['labels'].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE__ ) as warning_list: a_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : Dict = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" a_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: a_ : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to( SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.default_image_processor a_ : List[Any] = prepare_img() a_ : Any = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : Any = model(**SCREAMING_SNAKE_CASE__ ) # verify the logits a_ : Tuple = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : str = DeiTModel.from_pretrained( 'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' ) a_ : List[Any] = self.default_image_processor a_ : int = prepare_img() a_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ) a_ : List[str] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): a_ : Tuple = model(SCREAMING_SNAKE_CASE__ )
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : str = size if size is not None else {'shortest_edge': 2_5_6} a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = do_resize a_ : Dict = size a_ : Optional[Any] = resample a_ : Optional[int] = do_center_crop a_ : Dict = crop_size a_ : int = do_rescale a_ : int = rescale_factor a_ : Tuple = do_normalize a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]: a_ : List[str] = do_resize if do_resize is not None else self.do_resize a_ : Dict = size if size is not None else self.size a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = resample if resample is not None else self.resample a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : int = crop_size if crop_size is not None else self.crop_size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Any = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : Dict = image_std if image_std is not None else self.image_std a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Tuple = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset UpperCAmelCase_ : Tuple = 'bert-base-cased' UpperCAmelCase_ : str = 'google/pegasus-xsum' UpperCAmelCase_ : int = [' Sam ate lunch today.', 'Sams lunch ingredients.'] UpperCAmelCase_ : Optional[Any] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] UpperCAmelCase_ : str = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase_ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase_ : Optional[Any] = 'sshleifer/tiny-mbart' UpperCAmelCase_ : Optional[int] = 'sshleifer/tiny-marian-en-de' def SCREAMING_SNAKE_CASE_ ( __A : Path , __A : list ) -> Tuple: """simple docstring""" a_ : List[str] = '\n'.join(__A ) Path(__A ).open('w' ).writelines(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[Any]: """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A ) _dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A ) return tmp_dir class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES ) a_ : Tuple = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES ) a_ : Optional[Any] = 4 a_ : str = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated a_ , a_ : str = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. a_ : str = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place a_ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]: a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES ) a_ : int = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES ) a_ : List[Any] = 4 a_ : int = LegacySeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE__ , ) a_ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: a_ : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) a_ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) a_ : Union[str, Any] = tmp_dir.joinpath('train.source' ).open().readlines() a_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1_2_8 , SCREAMING_SNAKE_CASE__ ) a_ : List[str] = {x.name for x in tmp_dir.iterdir()} a_ : Optional[Any] = {x.name for x in save_dir.iterdir()} a_ : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1 assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE__ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: if not FAIRSEQ_AVAILABLE: return a_ , a_ , a_ : Optional[int] = self._get_dataset(max_len=6_4 ) a_ : Tuple = 6_4 a_ : Any = ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE__ , required_batch_size_multiple=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = [len(SCREAMING_SNAKE_CASE__ ) for x in batch_sampler] assert len(set(SCREAMING_SNAKE_CASE__ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) # no dropped or added examples a_ : Dict = DataLoader(SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 ) a_ : Tuple = [] a_ : Optional[int] = [] for batch in data_loader: a_ : Any = batch['input_ids'].shape a_ : Any = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple a_ : List[Any] = np.product(batch['input_ids'].shape ) num_src_per_batch.append(SCREAMING_SNAKE_CASE__ ) if num_src_tokens > (max_tokens * 1.1): failures.append(SCREAMING_SNAKE_CASE__ ) assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE__ ) if failures: raise AssertionError(F"""too many tokens in {len(SCREAMING_SNAKE_CASE__ )} batches""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ , a_ , a_ : Union[str, Any] = self._get_dataset(max_len=5_1_2 ) a_ : Union[str, Any] = 2 a_ : Tuple = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 ) a_ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.pad_token_id def count_pad_tokens(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any="input_ids" ): return [batch[k].eq(SCREAMING_SNAKE_CASE__ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_2_8 ) -> Dict: if os.getenv('USE_REAL_DATA' , SCREAMING_SNAKE_CASE__ ): a_ : Optional[Any] = 'examples/seq2seq/wmt_en_ro' a_ : Dict = max_len * 2 * 6_4 if not Path(SCREAMING_SNAKE_CASE__ ).joinpath('train.len' ).exists(): save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: a_ : Any = 'examples/seq2seq/test_data/wmt_en_ro' a_ : Tuple = max_len * 4 save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , ) return ds, max_tokens, tokenizer def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ , a_ , a_ : Optional[int] = self._get_dataset() a_ : Optional[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) ) a_ : List[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) ) assert idsa.intersection(SCREAMING_SNAKE_CASE__ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> str: a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ ) if tok_name == MBART_TINY: a_ : Dict = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) a_ : Dict = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: a_ : Any = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) a_ : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(SCREAMING_SNAKE_CASE__ ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE__ ) == 0
32
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]: """simple docstring""" a_ : Any = int(__A ) # Initialize Result a_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__A ): # Find denominations while int(__A ) >= int(__A ): total_value -= int(__A ) answer.append(__A ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F'Following is minimal change for {value}: ') UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
32
1
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Tuple: """simple docstring""" if "img_encoder.pos_embed" in name: a_ : str = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: a_ : Tuple = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: a_ : Optional[Any] = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: a_ : Optional[int] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: a_ : Union[str, Any] = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: a_ : int = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: a_ : Optional[Any] = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: a_ : str = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: a_ : List[Any] = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: a_ : str = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: a_ : Optional[Any] = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: a_ : List[Any] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: a_ : Optional[Any] = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: a_ : List[Any] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: a_ : Tuple = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: a_ : Any = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: a_ : Any = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: a_ : Any = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: a_ : Optional[int] = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: a_ : int = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: a_ : Union[str, Any] = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: a_ : Tuple = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: a_ : List[str] = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: a_ : Optional[Any] = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Union[str, Any] ) -> List[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): a_ : str = orig_state_dict.pop(__A ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors a_ : Optional[int] = key.split('.' ) a_ , a_ : List[str] = int(key_split[2] ), int(key_split[4] ) a_ : str = config.vision_config.hidden_size if "weight" in key: a_ : Dict = val[:dim, :] a_ : List[Any] = val[dim : dim * 2, :] a_ : str = val[-dim:, :] else: a_ : List[str] = val[:dim] a_ : Union[str, Any] = val[dim : dim * 2] a_ : Any = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors a_ : Optional[int] = key.split('.' ) a_ : Optional[Any] = int(key_split[3] ) a_ : List[str] = config.text_config.hidden_size if "weight" in key: a_ : int = val[:dim, :] a_ : str = val[ dim : dim * 2, : ] a_ : List[Any] = val[-dim:, :] else: a_ : str = val[:dim] a_ : Union[str, Any] = val[dim : dim * 2] a_ : Dict = val[-dim:] else: a_ : List[Any] = rename_key(__A ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): a_ : Tuple = val.squeeze_() else: a_ : List[str] = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" a_ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' a_ : List[str] = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : str , __A : Union[str, Any]="groupvit-gcc-yfcc" , __A : Optional[Any]=False ) -> List[str]: """simple docstring""" a_ : Optional[int] = GroupViTConfig() a_ : Union[str, Any] = GroupViTModel(__A ).eval() a_ : List[str] = torch.load(__A , map_location='cpu' )['model'] a_ : Dict = convert_state_dict(__A , __A ) a_ , a_ : Tuple = model.load_state_dict(__A , strict=__A ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A ) == 0) # verify result a_ : List[Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) a_ : Optional[int] = prepare_img() a_ : Tuple = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__A , padding=__A , return_tensors='pt' ) with torch.no_grad(): a_ : List[str] = model(**__A ) if model_name == "groupvit-gcc-yfcc": a_ : Any = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": a_ : Optional[int] = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , __A , atol=1e-3 ) processor.save_pretrained(__A ) model.save_pretrained(__A ) print('Successfully saved processor and model to' , __A ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(__A , organization='nielsr' ) model.push_to_hub(__A , organization='nielsr' ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) UpperCAmelCase_ : int = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
32
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape a_ : List[str] = jax.image.resize( SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a_ : str = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : int = None snake_case__ : float = 0.0 snake_case__ : bool = None snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : Any = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype ) a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : int = nn.Dropout(self.dropout_prob ) a_ : Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a_ : List[Any] = None if use_nin_shortcut: a_ : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int: a_ : List[Any] = hidden_states a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ ) a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) ) a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 ) a_ : Optional[int] = hidden_states + temb a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ ) if self.conv_shortcut is not None: a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ ) return hidden_states + residual
32
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = { 'facebook/deit-base-distilled-patch16-224': ( 'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = '''deit''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = hidden_size a_ : Dict = num_hidden_layers a_ : int = num_attention_heads a_ : Optional[Any] = intermediate_size a_ : Optional[int] = hidden_act a_ : int = hidden_dropout_prob a_ : Any = attention_probs_dropout_prob a_ : List[str] = initializer_range a_ : Optional[Any] = layer_norm_eps a_ : str = image_size a_ : Dict = patch_size a_ : Union[str, Any] = num_channels a_ : Tuple = qkv_bias a_ : int = encoder_stride class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE ( self : int ) -> float: return 1E-4
32
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin UpperCAmelCase_ : Optional[Any] = False @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : str = StableDiffusionAttendAndExcitePipeline snake_case__ : Tuple = False snake_case__ : Dict = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) snake_case__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[Any]: super().setUpClass() torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> List[str]: super().tearDownClass() torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , ) a_ : str = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Dict = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> Any: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: a_ : List[str] = 'cpu' a_ : Optional[Any] = self.get_dummy_components() a_ : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : str = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) a_ : Any = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) a_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-3 ) def SCREAMING_SNAKE_CASE ( self : int ) -> int: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def SCREAMING_SNAKE_CASE ( self : str ) -> int: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: super().test_save_load_local(expected_max_difference=5E-4 ) def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Tuple: super().tearDownClass() torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : str = torch.manual_seed(5_1 ) a_ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa ) pipe.to('cuda' ) a_ : Any = 'a painting of an elephant with glasses' a_ : str = [5, 7] a_ : Dict = pipe( prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] a_ : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
32
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase_ : str = 'src/diffusers' UpperCAmelCase_ : Optional[Any] = '.' # This is to make sure the diffusers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = importlib.util.spec_from_file_location( 'diffusers', os.path.join(DIFFUSERS_PATH, '__init__.py'), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCAmelCase_ : List[str] = spec.loader.load_module() def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Union[str, Any] ) -> Any: """simple docstring""" return line.startswith(__A ) or len(__A ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , __A ) is not None def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[int]: """simple docstring""" a_ : Dict = object_name.split('.' ) a_ : Dict = 0 # First let's find the module where our object lives. a_ : str = parts[i] while i < len(__A ) and not os.path.isfile(os.path.join(__A , F"""{module}.py""" ) ): i += 1 if i < len(__A ): a_ : Optional[Any] = os.path.join(__A , parts[i] ) if i >= len(__A ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(__A , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f: a_ : int = f.readlines() # Now let's find the class / func in the code! a_ : Any = '' a_ : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(__A ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__A ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). a_ : Union[str, Any] = line_index while line_index < len(__A ) and _should_continue(lines[line_index] , __A ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a_ : str = lines[start_index:line_index] return "".join(__A ) UpperCAmelCase_ : Union[str, Any] = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)') UpperCAmelCase_ : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)') UpperCAmelCase_ : List[Any] = re.compile(R'<FILL\s+[^>]*>') def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Optional[int]: """simple docstring""" a_ : str = code.split('\n' ) a_ : int = 0 while idx < len(__A ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__A ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def SCREAMING_SNAKE_CASE_ ( __A : int ) -> List[str]: """simple docstring""" a_ : Tuple = len(get_indent(__A ) ) > 0 if has_indent: a_ : Any = F"""class Bla:\n{code}""" a_ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__A ) a_ : List[Any] = black.format_str(__A , mode=__A ) a_ , a_ : int = style_docstrings_in_code(__A ) return result[len('class Bla:\n' ) :] if has_indent else result def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any]=False ) -> int: """simple docstring""" with open(__A , 'r' , encoding='utf-8' , newline='\n' ) as f: a_ : Optional[int] = f.readlines() a_ : List[Any] = [] a_ : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__A ): a_ : List[Any] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. a_ , a_ , a_ : int = search.groups() a_ : Any = find_code_in_diffusers(__A ) a_ : List[Any] = get_indent(__A ) a_ : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 a_ : Union[str, Any] = theoretical_indent a_ : Dict = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. a_ : List[str] = True while line_index < len(__A ) and should_continue: line_index += 1 if line_index >= len(__A ): break a_ : List[str] = lines[line_index] a_ : Any = _should_continue(__A , __A ) and re.search(F"""^{indent}# End copy""" , __A ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 a_ : int = lines[start_index:line_index] a_ : Union[str, Any] = ''.join(__A ) # Remove any nested `Copied from` comments to avoid circular copies a_ : Tuple = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__A ) is None] a_ : int = '\n'.join(__A ) # Before comparing, use the `replace_pattern` on the original code. if len(__A ) > 0: a_ : List[str] = replace_pattern.replace('with' , '' ).split(',' ) a_ : Any = [_re_replace_pattern.search(__A ) for p in patterns] for pattern in patterns: if pattern is None: continue a_ , a_ , a_ : Any = pattern.groups() a_ : List[Any] = re.sub(__A , __A , __A ) if option.strip() == "all-casing": a_ : List[str] = re.sub(obja.lower() , obja.lower() , __A ) a_ : Dict = re.sub(obja.upper() , obja.upper() , __A ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line a_ : Dict = blackify(lines[start_index - 1] + theoretical_code ) a_ : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: a_ : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:] a_ : Dict = start_index + 1 if overwrite and len(__A ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(__A , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__A ) return diffs def SCREAMING_SNAKE_CASE_ ( __A : bool = False ) -> Optional[Any]: """simple docstring""" a_ : int = glob.glob(os.path.join(__A , '**/*.py' ) , recursive=__A ) a_ : List[str] = [] for filename in all_files: a_ : Union[str, Any] = is_copy_consistent(__A , __A ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(__A ) > 0: a_ : Dict = '\n'.join(__A ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCAmelCase_ : List[str] = parser.parse_args() check_copies(args.fix_and_overwrite)
32
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict: """simple docstring""" a_ : Tuple = script.contents[0] a_ : int = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Tuple = F"""https://www.instagram.com/{username}/""" a_ : Optional[Any] = self.get_json() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict: a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ) -> str: return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: return self.user_data["username"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool: return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions a_ : int = InstagramUser(__A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
32
1
from __future__ import annotations import requests UpperCAmelCase_ : Dict = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : int = 1 , __A : str = "new" , __A : list | None = None ) -> dict: """simple docstring""" a_ : List[Any] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__A ) - valid_terms ) ): a_ : List[Any] = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(__A ) a_ : Dict = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , ) if response.status_code == 4_29: raise requests.HTTPError a_ : Dict = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__A )} a_ : List[Any] = {} for id_ in range(__A ): a_ : Any = { item: data['data']['children'][id_]['data'][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
32
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = ['''image_processor''', '''tokenizer'''] snake_case__ : Union[str, Any] = '''CLIPImageProcessor''' snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any: a_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = kwargs.pop('feature_extractor' ) a_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: a_ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : str = self.tokenizer.model_input_names a_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor
32
1
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Dict = (DDPMScheduler,) def SCREAMING_SNAKE_CASE ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: a_ : str = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : int ) -> Any: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: a_ : Optional[Any] = self.scheduler_classes[0] a_ : Tuple = self.get_scheduler_config() a_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: a_ : Optional[int] = self.scheduler_classes[0] a_ : str = self.get_scheduler_config() a_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : List[str] = len(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.dummy_model() a_ : List[str] = self.dummy_sample_deter a_ : str = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ): # 1. predict noise residual a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # 2. predict previous mean of sample x_t-1 a_ : int = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a_ : List[Any] = pred_prev_sample a_ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: a_ : List[str] = self.scheduler_classes[0] a_ : Dict = self.get_scheduler_config(prediction_type='v_prediction' ) a_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : str = len(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.dummy_model() a_ : Any = self.dummy_sample_deter a_ : Any = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE__ ) ): # 1. predict noise residual a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # 2. predict previous mean of sample x_t-1 a_ : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a_ : Optional[Any] = pred_prev_sample a_ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: a_ : Union[str, Any] = self.scheduler_classes[0] a_ : Optional[Any] = self.get_scheduler_config() a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : Any = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ ) a_ : Any = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE__ ): if i == len(SCREAMING_SNAKE_CASE__ ) - 1: a_ : Union[str, Any] = -1 else: a_ : Union[str, Any] = timesteps[i + 1] a_ : str = scheduler.previous_timestep(SCREAMING_SNAKE_CASE__ ) a_ : Any = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: a_ : List[Any] = self.scheduler_classes[0] a_ : Optional[int] = self.get_scheduler_config() a_ : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: a_ : List[Any] = self.scheduler_classes[0] a_ : int = self.get_scheduler_config() a_ : int = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = [1_0_0, 8_7, 5_0, 1, 0] a_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) with self.assertRaises(SCREAMING_SNAKE_CASE__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE__ , timesteps=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> str: a_ : List[str] = self.scheduler_classes[0] a_ : List[Any] = self.get_scheduler_config() a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE__ )
32
from __future__ import annotations UpperCAmelCase_ : Tuple = [] def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool: """simple docstring""" for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool: """simple docstring""" if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): a_ : Any = 1 solve(__A , row + 1 ) a_ : Tuple = 0 return False def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) UpperCAmelCase_ : List[str] = 8 UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
32
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : int = size if size is not None else {'shortest_edge': 2_2_4} a_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ , param_name='crop_size' ) a_ : Optional[int] = do_resize a_ : Dict = size a_ : Union[str, Any] = resample a_ : Optional[int] = do_center_crop a_ : Optional[int] = crop_size a_ : Optional[int] = do_rescale a_ : List[str] = rescale_factor a_ : Optional[Any] = do_normalize a_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a_ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD a_ : Tuple = do_convert_rgb def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> np.ndarray: a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> np.ndarray: a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[int]: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image: a_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize a_ : Optional[int] = size if size is not None else self.size a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='size' , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : int = resample if resample is not None else self.resample a_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : Tuple = crop_size if crop_size is not None else self.crop_size a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Any = do_rescale if do_rescale is not None else self.do_rescale a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize a_ : List[Any] = image_mean if image_mean is not None else self.image_mean a_ : int = image_std if image_std is not None else self.image_std a_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a_ : List[str] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: a_ : Any = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images] # All transformations expect numpy arrays. a_ : Optional[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : Dict = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : str = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : int = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : List[Any] = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
1
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCAmelCase_ : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCAmelCase_ : str = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE_ ( __A : Vector , __A : Vector ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(__A ) - np.asarray(__A )) ** 2 ) ) def SCREAMING_SNAKE_CASE_ ( __A : Vector , __A : Vector ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(__A , __A ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) ) benchmark()
32
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : int=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 * 8 , SCREAMING_SNAKE_CASE__ : Dict=3_2 * 8 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , ) -> str: a_ : Optional[Any] = parent a_ : List[Any] = batch_size a_ : List[Any] = is_training a_ : str = use_auxiliary_loss a_ : str = num_queries a_ : str = num_channels a_ : Union[str, Any] = min_size a_ : Union[str, Any] = max_size a_ : int = num_labels a_ : List[Any] = hidden_dim a_ : int = hidden_dim def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( SCREAMING_SNAKE_CASE__ ) a_ : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ ) a_ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE__ ) > 0.5 ).float() a_ : int = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE__ ) > 0.5).long() a_ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: a_ : Any = MaskaFormerConfig( hidden_size=self.hidden_dim , ) a_ : Tuple = self.num_queries a_ : Tuple = self.num_labels a_ : Optional[int] = [1, 1, 1, 1] a_ : Union[str, Any] = self.num_channels a_ : str = 6_4 a_ : List[Any] = 1_2_8 a_ : Tuple = self.hidden_dim a_ : Dict = self.hidden_dim a_ : Optional[Any] = self.hidden_dim return config def SCREAMING_SNAKE_CASE ( self : Any ) -> str: a_ , a_ , a_ , a_ , a_ : List[str] = self.prepare_config_and_inputs() a_ : Dict = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: a_ : List[str] = output.encoder_hidden_states a_ : Any = output.pixel_decoder_hidden_states a_ : List[Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) , config.decoder_layers ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Optional[int]: with torch.no_grad(): a_ : int = MaskaFormerModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str: a_ : Dict = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE__ : List[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a_ : str = model(pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ ) a_ : str = model(SCREAMING_SNAKE_CASE__ ) comm_check_on_output(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = model( pixel_values=SCREAMING_SNAKE_CASE__ , pixel_mask=SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ) comm_check_on_output(SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case__ : List[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case__ : Dict = False snake_case__ : Optional[int] = False snake_case__ : Tuple = False snake_case__ : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( self : str ) -> str: a_ : Optional[Any] = MaskaFormerModelTester(self ) a_ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: pass @unittest.skip(reason='Mask2Former is not a generative model' ) def SCREAMING_SNAKE_CASE ( self : Any ) -> int: pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: pass def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Dict = model_class(SCREAMING_SNAKE_CASE__ ) a_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ : Optional[Any] = [*signature.parameters.keys()] a_ : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : int ) -> str: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: a_ : Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: a_ : List[str] = (self.model_tester.min_size,) * 2 a_ : Dict = { 'pixel_values': torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE__ ), 'mask_labels': torch.randn((2, 1_0, *size) , device=SCREAMING_SNAKE_CASE__ ), 'class_labels': torch.zeros(2 , 1_0 , device=SCREAMING_SNAKE_CASE__ ).long(), } a_ : Optional[int] = self.model_tester.get_config() a_ : int = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.loss is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Any = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = model(**SCREAMING_SNAKE_CASE__ , output_attentions=SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.attentions is not None ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: if not self.model_tester.is_training: return a_ : Optional[int] = self.all_model_classes[1] a_ , a_ , a_ , a_ , a_ : int = self.model_tester.prepare_config_and_inputs() a_ : int = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() a_ : List[str] = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ).loss loss.backward() def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Dict = self.all_model_classes[1] a_ , a_ , a_ , a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs() a_ : Dict = True a_ : Optional[int] = True a_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) model.train() a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , mask_labels=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a_ : Dict = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() a_ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a_ : List[str] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCAmelCase_ : Dict = 1e-4 def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" a_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: return "facebook/mask2former-swin-small-coco-instance" @cached_property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: a_ : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ) a_ : Any = self.default_image_processor a_ : Union[str, Any] = prepare_img() a_ : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 3_8_4, 3_8_4) ) with torch.no_grad(): a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = torch.tensor( [[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) ) a_ : str = torch.tensor( [[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) ) a_ : str = torch.tensor( [[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : str = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval() a_ : List[Any] = self.default_image_processor a_ : Tuple = prepare_img() a_ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(SCREAMING_SNAKE_CASE__ , (1, 3, 3_8_4, 3_8_4) ) with torch.no_grad(): a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ) # masks_queries_logits a_ : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) a_ : List[str] = [ [-8.7839, -9.0056, -8.8121], [-7.4104, -7.0313, -6.5401], [-6.6105, -6.3427, -6.4675], ] a_ : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) ) # class_queries_logits a_ : Tuple = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) a_ : Optional[int] = torch.tensor( [ [1.8324, -8.0835, -4.1922], [0.8450, -9.0050, -3.6053], [0.3045, -7.7293, -3.0275], ] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: a_ : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE__ ).eval() a_ : List[str] = self.default_image_processor a_ : List[str] = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , ) a_ : Any = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['mask_labels']] a_ : str = [el.to(SCREAMING_SNAKE_CASE__ ) for el in inputs['class_labels']] with torch.no_grad(): a_ : Tuple = model(**SCREAMING_SNAKE_CASE__ ) self.assertTrue(outputs.loss is not None )
32
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : Tuple = data def __iter__( self : Union[str, Any] ) -> Union[str, Any]: for element in self.data: yield element def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any]=True ) -> Tuple: """simple docstring""" a_ : Optional[Any] = Accelerator(even_batches=__A ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def SCREAMING_SNAKE_CASE_ ( __A : Accelerator , __A : int , __A : int , __A : bool = False ) -> Any: """simple docstring""" if iterable: a_ : Optional[int] = DummyIterableDataset(torch.as_tensor(range(__A ) ) ) else: a_ : Optional[Any] = TensorDataset(torch.as_tensor(range(__A ) ) ) a_ : Dict = DataLoader(__A , batch_size=__A ) a_ : Dict = accelerator.prepare(__A ) return dl def SCREAMING_SNAKE_CASE_ ( __A : Accelerator , __A : int , __A : int , __A : List[int] , __A : List[int] , ) -> Union[str, Any]: """simple docstring""" a_ : int = create_dataloader(accelerator=__A , dataset_size=__A , batch_size=__A ) a_ : str = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" a_ : List[Any] = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" a_ : Tuple = create_accelerator(even_batches=__A ) verify_dataloader_batch_sizes( __A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def SCREAMING_SNAKE_CASE_ ( ) -> str: """simple docstring""" a_ : List[str] = create_accelerator(even_batches=__A ) a_ : List[str] = torch.nn.Linear(1 , 1 ) a_ : Optional[int] = accelerator.prepare(__A ) a_ : Any = create_dataloader(__A , dataset_size=3 , batch_size=1 ) a_ : Optional[int] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__A ): a_ : List[str] = ddp_model(batch[0].float() ) a_ : List[str] = output.sum() loss.backward() batch_idxs.append(__A ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int: """simple docstring""" with warnings.catch_warnings(record=__A ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __A ) assert "only supported for multi-GPU" in str(w[-1].message ) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" a_ : List[Any] = True a_ : List[Any] = False a_ : List[str] = create_accelerator(even_batches=__A ) a_ : Optional[int] = torch.nn.Linear(1 , 1 ) a_ : str = accelerator.prepare(__A ) a_ : Optional[int] = create_dataloader(__A , dataset_size=3 , batch_size=1 ) a_ : Any = create_dataloader(__A , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ): a_ : Optional[Any] = train_dl.batch_sampler.even_batches a_ : Dict = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" a_ : List[str] = True a_ : str = False a_ : Any = create_accelerator(even_batches=__A ) a_ : Any = torch.nn.Linear(1 , 1 ) a_ : str = accelerator.prepare(__A ) create_dataloader(__A , dataset_size=3 , batch_size=1 , iterable=__A ) a_ : Optional[int] = create_dataloader(__A , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings('ignore' ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ): a_ : Union[str, Any] = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: """simple docstring""" a_ : Union[str, Any] = create_accelerator() a_ : Any = torch.nn.Linear(1 , 1 ) a_ : Union[str, Any] = accelerator.prepare(__A ) create_dataloader(__A , dataset_size=3 , batch_size=1 , iterable=__A ) with warnings.catch_warnings(record=__A ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ): pass assert issubclass(w[-1].category , __A ) assert "only supported for map-style datasets" in str(w[-1].message ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" a_ : Any = create_accelerator() accelerator.print('Test that even_batches variable ensures uniform batches across processes' ) test_default_ensures_even_batch_sizes() accelerator.print('Run tests with even_batches disabled' ) test_can_disable_even_batches() accelerator.print('Test joining uneven inputs' ) test_can_join_uneven_inputs() accelerator.print('Test overriding even_batches when joining uneven inputs' ) test_join_can_override_even_batches() accelerator.print('Test overriding even_batches for mixed dataloader types' ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print('Test join with non DDP distributed raises warning' ) a_ : Optional[int] = accelerator.state.distributed_type a_ : Optional[int] = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__A ) a_ : List[str] = original_state if __name__ == "__main__": main()
32
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" a_ : Tuple = [] for line in lines: a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments if line: filtered_lines.append(__A ) a_ : Tuple = '\n'.join(__A ) # Make a hash from all this code a_ : Tuple = full_str.encode('utf-8' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Dict = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
32
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : int = '''table-transformer''' snake_case__ : Union[str, Any] = ['''past_key_values'''] snake_case__ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : str=1_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : Dict=6 , SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[Any]="sine" , SCREAMING_SNAKE_CASE__ : Dict="resnet50" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , **SCREAMING_SNAKE_CASE__ : int , ) -> Tuple: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) a_ : Dict = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : int = backbone_config.get('model_type' ) a_ : Any = CONFIG_MAPPING[backbone_model_type] a_ : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # set timm attributes to None a_ , a_ , a_ : Optional[int] = None, None, None a_ : Union[str, Any] = use_timm_backbone a_ : str = backbone_config a_ : str = num_channels a_ : Tuple = num_queries a_ : int = d_model a_ : Optional[int] = encoder_ffn_dim a_ : str = encoder_layers a_ : Optional[Any] = encoder_attention_heads a_ : Any = decoder_ffn_dim a_ : int = decoder_layers a_ : Dict = decoder_attention_heads a_ : Union[str, Any] = dropout a_ : Dict = attention_dropout a_ : Dict = activation_dropout a_ : Optional[Any] = activation_function a_ : List[str] = init_std a_ : Any = init_xavier_std a_ : Union[str, Any] = encoder_layerdrop a_ : Optional[int] = decoder_layerdrop a_ : Union[str, Any] = encoder_layers a_ : Dict = auxiliary_loss a_ : str = position_embedding_type a_ : Union[str, Any] = backbone a_ : Any = use_pretrained_backbone a_ : List[str] = dilation # Hungarian matcher a_ : Tuple = class_cost a_ : Optional[Any] = bbox_cost a_ : Any = giou_cost # Loss coefficients a_ : Optional[Any] = mask_loss_coefficient a_ : Union[str, Any] = dice_loss_coefficient a_ : Dict = bbox_loss_coefficient a_ : List[Any] = giou_loss_coefficient a_ : str = eos_coefficient super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.d_model class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : int = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def SCREAMING_SNAKE_CASE ( self : Any ) -> float: return 1E-5 @property def SCREAMING_SNAKE_CASE ( self : int ) -> int: return 1_2
32
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = vocab_size a_ : List[str] = hidden_size a_ : List[str] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Optional[int] = intermediate_size a_ : int = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : str = max_position_embeddings a_ : List[str] = type_vocab_size a_ : List[str] = initializer_range a_ : Tuple = layer_norm_eps a_ : Optional[int] = embedding_size a_ : List[Any] = head_ratio a_ : List[Any] = conv_kernel_size a_ : Tuple = num_groups a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
32
1
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden a_ : Optional[Any] = deepcopy(SCREAMING_SNAKE_CASE__ ) elif os.path.exists(SCREAMING_SNAKE_CASE__ ): with io.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f: a_ : Any = json.load(SCREAMING_SNAKE_CASE__ ) else: try: a_ : int = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode('utf-8' ) a_ : str = json.loads(SCREAMING_SNAKE_CASE__ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" ) a_ : Any = config self.set_stage_and_offload() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. a_ : Tuple = self.get_value('zero_optimization.stage' , -1 ) # offload a_ : Union[str, Any] = False if self.is_zeroa() or self.is_zeroa(): a_ : List[Any] = set(['cpu', 'nvme'] ) a_ : List[Any] = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: a_ : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: a_ : Union[str, Any] = self.config # find the config node of interest if it exists a_ : Dict = ds_key_long.split('.' ) a_ : str = nodes.pop() for node in nodes: a_ : Optional[int] = config.get(SCREAMING_SNAKE_CASE__ ) if config is None: return None, ds_key return config, ds_key def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Dict: a_ , a_ : Dict = self.find_config_node(SCREAMING_SNAKE_CASE__ ) if config is None: return default return config.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[int]: a_ : Optional[Any] = self.config # find the config node of interest if it exists a_ : Tuple = ds_key_long.split('.' ) for node in nodes: a_ : Any = config a_ : Optional[Any] = config.get(SCREAMING_SNAKE_CASE__ ) if config is None: if must_exist: raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" ) else: return # if found remove it if parent_config is not None: parent_config.pop(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str: a_ : Union[str, Any] = self.get_value(SCREAMING_SNAKE_CASE__ ) return False if value is None else bool(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]: a_ : List[Any] = self.get_value(SCREAMING_SNAKE_CASE__ ) return False if value is None else not bool(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: return self._stage == 2 def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return self._stage == 3 def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: return self._offload class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: a_ : Optional[int] = engine def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: # runs backpropagation and handles mixed precision self.engine.backward(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> Dict: super().__init__(SCREAMING_SNAKE_CASE__ , device_placement=SCREAMING_SNAKE_CASE__ , scaler=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = hasattr(self.optimizer , 'overflow' ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ) -> List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: if self.__has_overflow__: return self.optimizer.overflow return False class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=0.001 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: a_ : Tuple = params a_ : Optional[int] = lr a_ : List[str] = weight_decay a_ : Optional[Any] = kwargs class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: a_ : Optional[int] = optimizer a_ : int = total_num_steps a_ : Optional[Any] = warmup_num_steps a_ : int = kwargs
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
1
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Any = GPTSanJapaneseTokenizer snake_case__ : Tuple = False snake_case__ : str = {'''do_clean_text''': False, '''add_prefix_space''': False} def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: super().setUp() # fmt: off a_ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on a_ : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 a_ : List[Any] = {'unk_token': '<unk>'} a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' a_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict: a_ , a_ : Union[str, Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: a_ : List[str] = self.get_tokenizer() # Testing tokenization a_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。' a_ : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens a_ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] a_ : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens a_ : int = tokens + [tokenizer.unk_token] a_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization a_ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' a_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' a_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : List[Any] = 'こんにちは、世界。' a_ : int = 'こんばんは、㔺界。😀' a_ : Dict = 'こんにちは、世界。こんばんは、世界。😀' a_ : Optional[int] = tokenizer.encode(prefix_text + input_text ) a_ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : str = 'こんにちは、世界。' a_ : List[str] = 'こんばんは、㔺界。😀' a_ : str = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Tuple = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1) a_ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] a_ : Tuple = [1] + [1] * (len_prefix) + [0] * (len_text + 1) a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids a_ : Union[str, Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: a_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[int] = tokenizer.encode('あンいワ' ) a_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' ) a_ : Dict = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: a_ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # fmt: off a_ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] a_ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: # tokenizer has no padding token pass
32
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
1
import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin UpperCAmelCase_ : str = logging.get_logger(__name__) enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = UNetaDModel snake_case__ : int = '''sample''' @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: a_ : Optional[Any] = 4 a_ : Tuple = 3 a_ : Dict = (3_2, 3_2) a_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([1_0] ).to(SCREAMING_SNAKE_CASE__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : List[str] = { 'block_out_channels': (3_2, 6_4), 'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'), 'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'), 'attention_head_dim': 3, 'out_channels': 3, 'in_channels': 3, 'layers_per_block': 2, 'sample_size': 3_2, } a_ : List[Any] = self.dummy_input return init_dict, inputs_dict class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = UNetaDModel snake_case__ : Any = '''sample''' @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: a_ : Tuple = 4 a_ : Any = 4 a_ : Dict = (3_2, 3_2) a_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = torch.tensor([1_0] ).to(SCREAMING_SNAKE_CASE__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return (4, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: return (4, 3_2, 3_2) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: a_ : Any = { 'sample_size': 3_2, 'in_channels': 4, 'out_channels': 4, 'layers_per_block': 2, 'block_out_channels': (3_2, 6_4), 'attention_head_dim': 3_2, 'down_block_types': ('DownBlock2D', 'DownBlock2D'), 'up_block_types': ('UpBlock2D', 'UpBlock2D'), } a_ : int = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: a_ , a_ : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: a_ , a_ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : str = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` a_ , a_ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ ) model_accelerate.to(SCREAMING_SNAKE_CASE__ ) model_accelerate.eval() a_ : Optional[Any] = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) a_ : Union[str, Any] = noise.to(SCREAMING_SNAKE_CASE__ ) a_ : Any = torch.tensor([1_0] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ ) a_ : Dict = model_accelerate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['sample'] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() a_ , a_ : Optional[Any] = UNetaDModel.from_pretrained( 'fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE__ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ ) model_normal_load.to(SCREAMING_SNAKE_CASE__ ) model_normal_load.eval() a_ : List[Any] = model_normal_load(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['sample'] assert torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: a_ : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ) model.eval() model.to(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) a_ : Dict = noise.to(SCREAMING_SNAKE_CASE__ ) a_ : Any = torch.tensor([1_0] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample a_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off a_ : List[Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-3 ) ) class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : List[Any] = UNetaDModel snake_case__ : str = '''sample''' @property def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=(3_2, 3_2) ) -> List[str]: a_ : Any = 4 a_ : Any = 3 a_ : int = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE__ ) return {"sample": noise, "timestep": time_step} @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return (3, 3_2, 3_2) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: return (3, 3_2, 3_2) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Optional[Any] = { 'block_out_channels': [3_2, 6_4, 6_4, 6_4], 'in_channels': 3, 'layers_per_block': 1, 'out_channels': 3, 'time_embedding_type': 'fourier', 'norm_eps': 1E-6, 'mid_block_scale_factor': math.sqrt(2.0 ), 'norm_num_groups': None, 'down_block_types': [ 'SkipDownBlock2D', 'AttnSkipDownBlock2D', 'SkipDownBlock2D', 'SkipDownBlock2D', ], 'up_block_types': [ 'SkipUpBlock2D', 'SkipUpBlock2D', 'AttnSkipUpBlock2D', 'SkipUpBlock2D', ], } a_ : List[str] = self.dummy_input return init_dict, inputs_dict @slow def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: a_ , a_ : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.dummy_input a_ : Dict = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = noise a_ : List[str] = model(**SCREAMING_SNAKE_CASE__ ) assert image is not None, "Make sure output is not None" @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: a_ : List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : int = 4 a_ : Union[str, Any] = 3 a_ : List[str] = (2_5_6, 2_5_6) a_ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): a_ : Dict = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample a_ : Union[str, Any] = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off a_ : str = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: a_ : str = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = 4 a_ : List[str] = 3 a_ : Union[str, Any] = (3_2, 3_2) a_ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): a_ : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample a_ : int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off a_ : str = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1E-2 ) ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: # not required for this model pass
32
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Optional[int] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mask2former''' snake_case__ : Any = ['''swin'''] snake_case__ : str = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) a_ : Dict = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = backbone_config.pop('model_type' ) a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) a_ : Dict = backbone_config a_ : List[str] = feature_size a_ : List[str] = mask_feature_size a_ : int = hidden_dim a_ : Dict = encoder_feedforward_dim a_ : str = activation_function a_ : List[str] = encoder_layers a_ : List[str] = decoder_layers a_ : Dict = num_attention_heads a_ : str = dropout a_ : Tuple = dim_feedforward a_ : List[str] = pre_norm a_ : Optional[int] = enforce_input_projection a_ : Any = common_stride a_ : Optional[int] = ignore_value a_ : int = num_queries a_ : Tuple = no_object_weight a_ : Dict = class_weight a_ : Optional[int] = mask_weight a_ : Optional[int] = dice_weight a_ : str = train_num_points a_ : List[str] = oversample_ratio a_ : List[Any] = importance_sample_ratio a_ : Any = init_std a_ : Union[str, Any] = init_xavier_std a_ : Union[str, Any] = use_auxiliary_loss a_ : Dict = feature_strides a_ : List[str] = output_auxiliary_logits a_ : Dict = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : List[Any] = self.backbone_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list: """simple docstring""" if len(__A ) <= 1: return lst a_ : List[Any] = 1 while i < len(__A ): if lst[i - 1] <= lst[i]: i += 1 else: a_ , a_ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: a_ : Dict = 1 return lst if __name__ == "__main__": UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ : Any = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCAmelCase_ : Dict = [num for num in range(3, 10_0001, 2) if not is_prime(num)] def SCREAMING_SNAKE_CASE_ ( __A : int ) -> list[int]: """simple docstring""" if not isinstance(__A , __A ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) a_ : Any = [] for num in range(len(__A ) ): a_ : str = 0 while 2 * i * i <= odd_composites[num]: a_ : Any = odd_composites[num] - 2 * i * i if is_prime(__A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__A ) == n: return list_nums return [] def SCREAMING_SNAKE_CASE_ ( ) -> int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCAmelCase_ : List[str] = logging.getLogger(__name__) @dataclass(frozen=lowercase__ ) class SCREAMING_SNAKE_CASE__ : snake_case__ : str snake_case__ : str snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None snake_case__ : Optional[str] = None @dataclass(frozen=lowercase__ ) class SCREAMING_SNAKE_CASE__ : snake_case__ : List[int] snake_case__ : Optional[List[int]] = None snake_case__ : Optional[List[int]] = None snake_case__ : Optional[Union[int, float]] = None snake_case__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[InputFeatures] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[Any]: a_ : Dict = hans_processors[task]() a_ : str = os.path.join( SCREAMING_SNAKE_CASE__ , 'cached_{}_{}_{}_{}'.format( 'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , ) , ) a_ : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) a_ , a_ : Optional[Any] = label_list[2], label_list[1] a_ : Optional[Any] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a_ : Dict = cached_features_file + '.lock' with FileLock(SCREAMING_SNAKE_CASE__ ): if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) a_ : Optional[Any] = ( processor.get_dev_examples(SCREAMING_SNAKE_CASE__ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE__ ) ) logger.info('Training examples: %s' , len(SCREAMING_SNAKE_CASE__ ) ) a_ : Union[str, Any] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info('Saving features into cached file %s' , SCREAMING_SNAKE_CASE__ ) torch.save(self.features , SCREAMING_SNAKE_CASE__ ) def __len__( self : List[Any] ) -> Optional[Any]: return len(self.features ) def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> InputFeatures: return self.features[i] def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: return self.label_list if is_tf_available(): import tensorflow as tf class SCREAMING_SNAKE_CASE__ : snake_case__ : List[InputFeatures] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = 1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> str: a_ : Any = hans_processors[task]() a_ : Union[str, Any] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) a_ , a_ : List[Any] = label_list[2], label_list[1] a_ : Union[str, Any] = label_list a_ : Tuple = processor.get_dev_examples(SCREAMING_SNAKE_CASE__ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ): if ex_index % 1_0_0_0_0 == 0: logger.info('Writing example %d of %d' % (ex_index, len(SCREAMING_SNAKE_CASE__ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) a_ : Tuple = tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ( { 'example_id': tf.intaa, 'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa, }, tf.intaa, ) , ( { 'example_id': tf.TensorShape([] ), 'input_ids': tf.TensorShape([None, None] ), 'attention_mask': tf.TensorShape([None, None] ), 'token_type_ids': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: return self.dataset def __len__( self : Dict ) -> Tuple: return len(self.features ) def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> InputFeatures: return self.features[i] def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: return self.label_list class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE__ , 'heuristics_train_set.txt' ) ) , 'train' ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' ) def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: return ["contradiction", "entailment", "neutral"] def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: a_ : List[Any] = [] for i, line in enumerate(SCREAMING_SNAKE_CASE__ ): if i == 0: continue a_ : List[Any] = '%s-%s' % (set_type, line[0]) a_ : Tuple = line[5] a_ : Optional[int] = line[6] a_ : List[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7] a_ : Union[str, Any] = line[0] examples.append(InputExample(guid=SCREAMING_SNAKE_CASE__ , text_a=SCREAMING_SNAKE_CASE__ , text_b=SCREAMING_SNAKE_CASE__ , label=SCREAMING_SNAKE_CASE__ , pairID=SCREAMING_SNAKE_CASE__ ) ) return examples def SCREAMING_SNAKE_CASE_ ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Union[str, Any]: """simple docstring""" a_ : Tuple = {label: i for i, label in enumerate(__A )} a_ : List[str] = [] for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc='convert examples to features' ): if ex_index % 1_00_00 == 0: logger.info('Writing example %d' % (ex_index) ) a_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding='max_length' , truncation=__A , return_overflowing_tokens=__A , ) a_ : Union[str, Any] = label_map[example.label] if example.label in label_map else 0 a_ : Union[str, Any] = int(example.pairID ) features.append(InputFeatures(**__A , label=__A , pairID=__A ) ) for i, example in enumerate(examples[:5] ): logger.info('*** Example ***' ) logger.info(F"""guid: {example}""" ) logger.info(F"""features: {features[i]}""" ) return features UpperCAmelCase_ : List[str] = { 'hans': 3, } UpperCAmelCase_ : Tuple = { 'hans': HansProcessor, }
32
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
1
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int: """simple docstring""" a_ : Union[str, Any] = 2**power a_ : Tuple = str(__A ) a_ : int = list(__A ) a_ : Optional[Any] = 0 for i in list_num: sum_of_num += int(__A ) return sum_of_num if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) UpperCAmelCase_ : Dict = solution(power) print('Sum of the digits is: ', result)
32
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''owlvit_text_model''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=4_9_4_0_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : str=1_6 , SCREAMING_SNAKE_CASE__ : Any="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=1E-5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_9_4_0_6 , SCREAMING_SNAKE_CASE__ : Dict=4_9_4_0_7 , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = vocab_size a_ : List[Any] = hidden_size a_ : Dict = intermediate_size a_ : Dict = num_hidden_layers a_ : List[str] = num_attention_heads a_ : Any = max_position_embeddings a_ : str = hidden_act a_ : Optional[int] = layer_norm_eps a_ : Tuple = attention_dropout a_ : Any = initializer_range a_ : Optional[Any] = initializer_factor @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) a_ , a_ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": a_ : Union[str, Any] = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[Any] = '''owlvit_vision_model''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Any="quick_gelu" , SCREAMING_SNAKE_CASE__ : Tuple=1E-5 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Optional[Any]: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = hidden_size a_ : Optional[Any] = intermediate_size a_ : Optional[Any] = num_hidden_layers a_ : Optional[Any] = num_attention_heads a_ : List[str] = num_channels a_ : Tuple = image_size a_ : str = patch_size a_ : str = hidden_act a_ : Dict = layer_norm_eps a_ : List[str] = attention_dropout a_ : List[str] = initializer_range a_ : List[str] = initializer_factor @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) a_ , a_ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": a_ : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[Any] = '''owlvit''' snake_case__ : Dict = True def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=2.6592 , SCREAMING_SNAKE_CASE__ : List[str]=True , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any: super().__init__(**SCREAMING_SNAKE_CASE__ ) if text_config is None: a_ : Optional[int] = {} logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' ) if vision_config is None: a_ : int = {} logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' ) a_ : Union[str, Any] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ ) a_ : Any = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ ) a_ : str = projection_dim a_ : Any = logit_scale_init_value a_ : List[Any] = return_dict a_ : Optional[int] = 1.0 @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig": cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) a_ , a_ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: a_ : List[Any] = {} a_ : List[str] = text_config a_ : Any = vision_config return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : Tuple = self.text_config.to_dict() a_ : Optional[Any] = self.vision_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ] ) @property def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'}), ] ) @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> float: return 1E-4 def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: a_ : int = super().generate_dummy_inputs( processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) return {**text_input_dict, **image_input_dict} @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 1_4
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Tuple = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
32
1
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : Union[str, Any] = datasets.logging.get_logger(__name__) UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' UpperCAmelCase_ : str = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' UpperCAmelCase_ : int = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[str] , __A : List[Any]=False , __A : Tuple=False , __A : Union[str, Any]=True , __A : List[str]=False , __A : Optional[int]="dummy_doc" ) -> Any: """simple docstring""" a_ : Any = {doc: key_lines} a_ : List[Any] = {doc: sys_lines} a_ : Union[str, Any] = {} a_ : int = 0 a_ : List[Any] = 0 a_ : Union[str, Any] = 0 a_ : Union[str, Any] = 0 a_ : int = 0 a_ : Optional[int] = 0 a_ , a_ : Optional[int] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A ) key_singletons_num += singletons_num if NP_only or min_span: a_ : List[Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) a_ , a_ : List[str] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A ) sys_singletons_num += singletons_num if NP_only or min_span: a_ : int = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) if remove_nested: a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters a_ , a_ : Union[str, Any] = reader.remove_nested_coref_mentions(__A , __A ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters a_ : Any = reader.get_mention_assignments(__A , __A ) a_ : List[str] = reader.get_mention_assignments(__A , __A ) a_ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : str , __A : str , __A : List[str] , __A : Any , __A : Optional[int] , __A : Dict ) -> Optional[int]: """simple docstring""" a_ : Tuple = get_coref_infos(__A , __A , __A , __A , __A , __A ) a_ : Optional[int] = {} a_ : Union[str, Any] = 0 a_ : str = 0 for name, metric in metrics: a_ , a_ , a_ : Any = evaluator.evaluate_documents(__A , __A , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: a_ : List[Any] = (conll / 3) * 1_00 logger.info(F"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]: """simple docstring""" a_ : Union[str, Any] = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: a_ : str = line.split()[5] if not parse_col == "-": a_ : Optional[int] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> List[str]: a_ : str = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: a_ : Tuple = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" a_ : List[str] = evaluate( key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , ) return score
32
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Any = GPTSanJapaneseTokenizer snake_case__ : Tuple = False snake_case__ : str = {'''do_clean_text''': False, '''add_prefix_space''': False} def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: super().setUp() # fmt: off a_ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on a_ : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 a_ : List[Any] = {'unk_token': '<unk>'} a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' a_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict: a_ , a_ : Union[str, Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: a_ : List[str] = self.get_tokenizer() # Testing tokenization a_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。' a_ : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens a_ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] a_ : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens a_ : int = tokens + [tokenizer.unk_token] a_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization a_ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' a_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' a_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : List[Any] = 'こんにちは、世界。' a_ : int = 'こんばんは、㔺界。😀' a_ : Dict = 'こんにちは、世界。こんばんは、世界。😀' a_ : Optional[int] = tokenizer.encode(prefix_text + input_text ) a_ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : str = 'こんにちは、世界。' a_ : List[str] = 'こんばんは、㔺界。😀' a_ : str = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Tuple = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1) a_ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] a_ : Tuple = [1] + [1] * (len_prefix) + [0] * (len_text + 1) a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids a_ : Union[str, Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: a_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[int] = tokenizer.encode('あンいワ' ) a_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' ) a_ : Dict = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: a_ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # fmt: off a_ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] a_ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: # tokenizer has no padding token pass
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list ) -> float: """simple docstring""" _validate_point(__A ) _validate_point(__A ) if len(__A ) != len(__A ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(a - b ) for a, b in zip(__A , __A ) ) ) def SCREAMING_SNAKE_CASE_ ( __A : list[float] ) -> None: """simple docstring""" if point: if isinstance(__A , __A ): for item in point: if not isinstance(__A , (int, float) ): a_ : Tuple = ( 'Expected a list of numbers as input, found ' F"""{type(__A ).__name__}""" ) raise TypeError(__A ) else: a_ : Union[str, Any] = F"""Expected a list of numbers as input, found {type(__A ).__name__}""" raise TypeError(__A ) else: raise ValueError('Missing an input' ) def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list ) -> float: """simple docstring""" _validate_point(__A ) _validate_point(__A ) if len(__A ) != len(__A ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(x - y ) for x, y in zip(__A , __A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : str = size if size is not None else {'shortest_edge': 2_5_6} a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = do_resize a_ : Dict = size a_ : Optional[Any] = resample a_ : Optional[int] = do_center_crop a_ : Dict = crop_size a_ : int = do_rescale a_ : int = rescale_factor a_ : Tuple = do_normalize a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]: a_ : List[str] = do_resize if do_resize is not None else self.do_resize a_ : Dict = size if size is not None else self.size a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = resample if resample is not None else self.resample a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : int = crop_size if crop_size is not None else self.crop_size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Any = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : Dict = image_std if image_std is not None else self.image_std a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Tuple = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Optional[Any]: """simple docstring""" a_ : Optional[int] = FileLock(str(tmpdir / 'foo.lock' ) ) a_ : str = FileLock(str(tmpdir / 'foo.lock' ) ) a_ : Optional[Any] = 0.01 with locka.acquire(): with pytest.raises(__A ): a_ : Tuple = time.time() locka.acquire(__A ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[Any]: """simple docstring""" a_ : List[Any] = 'a' * 10_00 + '.lock' a_ : List[Any] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(__A ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 a_ : Tuple = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__A ): locka.acquire(0 )
32
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]: """simple docstring""" a_ : Any = int(__A ) # Initialize Result a_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__A ): # Find denominations while int(__A ) >= int(__A ): total_value -= int(__A ) answer.append(__A ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F'Following is minimal change for {value}: ') UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
32
1
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape a_ : List[str] = jax.image.resize( SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a_ : str = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : int = None snake_case__ : float = 0.0 snake_case__ : bool = None snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : Any = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype ) a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : int = nn.Dropout(self.dropout_prob ) a_ : Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a_ : List[Any] = None if use_nin_shortcut: a_ : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int: a_ : List[Any] = hidden_states a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ ) a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) ) a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 ) a_ : Optional[int] = hidden_states + temb a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ ) if self.conv_shortcut is not None: a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ ) return hidden_states + residual
32
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
1
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake UpperCAmelCase_ : Tuple = numpy.array([0, 0]) UpperCAmelCase_ : Any = numpy.array([0.5, 0.8_6_6_0_2_5_4]) UpperCAmelCase_ : Tuple = numpy.array([1, 0]) UpperCAmelCase_ : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] , __A : int ) -> list[numpy.ndarray]: """simple docstring""" a_ : Tuple = initial_vectors for _ in range(__A ): a_ : Tuple = iteration_step(__A ) return vectors def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] ) -> list[numpy.ndarray]: """simple docstring""" a_ : Optional[int] = [] for i, start_vector in enumerate(vectors[:-1] ): a_ : int = vectors[i + 1] new_vectors.append(__A ) a_ : List[str] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def SCREAMING_SNAKE_CASE_ ( __A : numpy.ndarray , __A : float ) -> numpy.ndarray: """simple docstring""" a_ : Tuple = numpy.radians(__A ) a_ , a_ : List[str] = numpy.cos(__A ), numpy.sin(__A ) a_ : Optional[int] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(__A , __A ) def SCREAMING_SNAKE_CASE_ ( __A : list[numpy.ndarray] ) -> None: """simple docstring""" a_ : int = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a_ , a_ : Any = zip(*__A ) plt.plot(__A , __A ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : List[str] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
32
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
1
from __future__ import annotations from collections.abc import Generator def SCREAMING_SNAKE_CASE_ ( ) -> Generator[int, None, None]: """simple docstring""" a_ : dict[int, int] = {} a_ : Tuple = 2 while True: a_ : Optional[int] = factor_map.pop(__A , __A ) if factor: a_ : Union[str, Any] = factor + prime while x in factor_map: x += factor a_ : List[Any] = factor else: a_ : Tuple = prime yield prime prime += 1 def SCREAMING_SNAKE_CASE_ ( __A : float = 1e1_0 ) -> int: """simple docstring""" a_ : List[str] = sieve() a_ : Any = 1 while True: a_ : List[Any] = next(__A ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__A ) n += 2 if __name__ == "__main__": print(solution())
32
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict: """simple docstring""" a_ : Tuple = script.contents[0] a_ : int = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Tuple = F"""https://www.instagram.com/{username}/""" a_ : Optional[Any] = self.get_json() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict: a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ) -> str: return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: return self.user_data["username"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool: return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions a_ : int = InstagramUser(__A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
32
1
def SCREAMING_SNAKE_CASE_ ( __A : list[int] ) -> float: """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError('List is empty' ) a_ : Any = sum(__A ) / len(__A ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__A ) if __name__ == "__main__": import doctest doctest.testmod()
32
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = ['''image_processor''', '''tokenizer'''] snake_case__ : Union[str, Any] = '''CLIPImageProcessor''' snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any: a_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = kwargs.pop('feature_extractor' ) a_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: a_ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : str = self.tokenizer.model_input_names a_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor
32
1
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase_ : Union[str, Any] = 'http://www.mocksite.com/file1.txt' UpperCAmelCase_ : Optional[Any] = '"text": ["foo", "foo"]' UpperCAmelCase_ : Optional[int] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class SCREAMING_SNAKE_CASE__ : snake_case__ : int = 200 snake_case__ : Any = {'''Content-Length''': '''100'''} snake_case__ : Optional[Any] = {} def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> str: return [bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )] def SCREAMING_SNAKE_CASE_ ( *__A : Optional[int] , **__A : Tuple ) -> List[Any]: """simple docstring""" return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Any ) -> Any: """simple docstring""" import requests monkeypatch.setattr(__A , 'request' , __A ) a_ : Dict = URL if issubclass(__A , __A ): a_ : Any = url elif issubclass(__A , __A ): a_ : Optional[int] = [url] elif issubclass(__A , __A ): a_ : Dict = {'train': url} a_ : Tuple = 'dummy' a_ : Optional[int] = 'downloads' a_ : Optional[int] = tmp_path a_ : List[Any] = DownloadConfig( cache_dir=os.path.join(__A , __A ) , use_etag=__A , ) a_ : str = DownloadManager(dataset_name=__A , download_config=__A ) a_ : Union[str, Any] = dl_manager.download(__A ) a_ : Union[str, Any] = urls for downloaded_paths in [downloaded_paths]: if isinstance(__A , __A ): a_ : Dict = [downloaded_paths] a_ : Dict = [urls] elif isinstance(__A , __A ): assert "train" in downloaded_paths.keys() a_ : List[str] = downloaded_paths.values() a_ : Optional[Any] = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__A , __A ): assert downloaded_path == dl_manager.downloaded_paths[input_url] a_ : List[Any] = Path(__A ) a_ : Optional[Any] = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() a_ : Any = downloaded_path.read_text() assert content == CONTENT a_ : str = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() a_ : Optional[Any] = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Union[str, Any] , __A : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a_ : List[str] = str(__A ) if issubclass(__A , __A ): a_ : Any = filename elif issubclass(__A , __A ): a_ : Tuple = [filename] elif issubclass(__A , __A ): a_ : Union[str, Any] = {'train': filename} a_ : int = 'dummy' a_ : Optional[int] = xz_file.parent a_ : Union[str, Any] = 'extracted' a_ : Dict = DownloadConfig( cache_dir=__A , use_etag=__A , ) a_ : str = DownloadManager(dataset_name=__A , download_config=__A ) a_ : Dict = dl_manager.extract(__A ) a_ : List[Any] = paths for extracted_paths in [extracted_paths]: if isinstance(__A , __A ): a_ : List[Any] = [extracted_paths] a_ : Union[str, Any] = [paths] elif isinstance(__A , __A ): assert "train" in extracted_paths.keys() a_ : Dict = extracted_paths.values() a_ : Optional[Any] = paths.values() assert extracted_paths for extracted_path, input_path in zip(__A , __A ): assert extracted_path == dl_manager.extracted_paths[input_path] a_ : int = Path(__A ) a_ : Dict = extracted_path.parts assert parts[-1] == hash_url_to_filename(__A , etag=__A ) assert parts[-2] == extracted_subdir assert extracted_path.exists() a_ : Any = extracted_path.read_text() a_ : str = text_file.read_text() assert extracted_file_content == expected_file_content def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] ) -> Optional[int]: """simple docstring""" assert path.endswith('.jsonl' ) for num_items, line in enumerate(__A , start=1 ): a_ : Tuple = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any] ) -> Tuple: """simple docstring""" a_ : Union[str, Any] = request.getfixturevalue(__A ) a_ : Optional[int] = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__A ) , start=1 ): _test_jsonl(__A , __A ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] ) -> str: """simple docstring""" a_ : Any = request.getfixturevalue(__A ) a_ : Dict = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__A ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__A ) , start=1 ): _test_jsonl(__A , __A ) assert num_tar == 1 assert num_jsonl == 2 def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> str: """simple docstring""" a_ : str = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__A ) , start=1 ): assert os.path.basename(__A ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
32
from __future__ import annotations UpperCAmelCase_ : Tuple = [] def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool: """simple docstring""" for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool: """simple docstring""" if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): a_ : Any = 1 solve(__A , row + 1 ) a_ : Tuple = 0 return False def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) UpperCAmelCase_ : List[str] = 8 UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
32
1
UpperCAmelCase_ : Any = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
32
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : UNetaDModel snake_case__ : KarrasVeScheduler def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : KarrasVeScheduler ) -> Optional[Any]: super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]: a_ : Tuple = self.unet.config.sample_size a_ : List[str] = (batch_size, 3, img_size, img_size) a_ : int = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) a_ : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper a_ : str = self.scheduler.schedule[t] a_ : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat a_ , a_ : Union[str, Any] = self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. a_ : Optional[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev a_ : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. a_ : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample a_ : Union[str, Any] = self.scheduler.step_correct( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , step_output.prev_sample , step_output['derivative'] , ) a_ : List[str] = step_output.prev_sample a_ : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) a_ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a_ : Any = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
32
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
1
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[Any] ) -> Dict: """simple docstring""" assert isinstance(__A , __A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[Any] , __A : Tuple ) -> Optional[Any]: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a_ : str = JsonDatasetReader(__A , cache_dir=__A , keep_in_memory=__A ).read() _check_json_dataset(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple , __A : Optional[int] ) -> Dict: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : int = features.copy() if features else default_expected_features a_ : str = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() _check_json_dataset(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[Any] , __A : str ) -> Any: """simple docstring""" a_ : Tuple = tmp_path / 'cache' a_ : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} a_ : Tuple = features.copy() if features else default_expected_features a_ : str = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : List[str] = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() assert isinstance(__A , __A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[str] ) -> Any: """simple docstring""" a_ : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} a_ : Optional[Any] = features.copy() a_ : List[str] = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : List[Any] = tmp_path / 'cache' a_ : Any = JsonDatasetReader(__A , features=__A , cache_dir=__A ).read() assert isinstance(__A , __A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[int] , __A : Any ) -> List[Any]: """simple docstring""" a_ : List[Any] = tmp_path / 'cache' a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : str = JsonDatasetReader(__A , cache_dir=__A , split=__A ).read() _check_json_dataset(__A , __A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] , __A : Optional[int] ) -> List[Any]: """simple docstring""" if issubclass(__A , __A ): a_ : Dict = jsonl_path elif issubclass(__A , __A ): a_ : Tuple = [jsonl_path] a_ : List[str] = tmp_path / 'cache' a_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : int = JsonDatasetReader(__A , cache_dir=__A ).read() _check_json_dataset(__A , __A ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Tuple , __A : Tuple=("train",) ) -> int: """simple docstring""" assert isinstance(__A , __A ) for split in splits: a_ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Dict , __A : Any ) -> Union[str, Any]: """simple docstring""" a_ : Any = tmp_path / 'cache' a_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): a_ : List[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=__A , keep_in_memory=__A ).read() _check_json_datasetdict(__A , __A ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Any , __A : List[Any] ) -> List[str]: """simple docstring""" a_ : List[Any] = tmp_path / 'cache' a_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : Union[str, Any] = features.copy() if features else default_expected_features a_ : Tuple = ( Features({feature: Value(__A ) for feature, dtype in features.items()} ) if features is not None else None ) a_ : Dict = JsonDatasetReader({'train': jsonl_path} , features=__A , cache_dir=__A ).read() _check_json_datasetdict(__A , __A ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str , __A : List[str] ) -> Any: """simple docstring""" if split: a_ : Union[str, Any] = {split: jsonl_path} else: a_ : str = 'train' a_ : List[Any] = {'train': jsonl_path, 'test': jsonl_path} a_ : Optional[Any] = tmp_path / 'cache' a_ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} a_ : Optional[int] = JsonDatasetReader(__A , cache_dir=__A ).read() _check_json_datasetdict(__A , __A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Optional[int]: """simple docstring""" return json.load(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]: """simple docstring""" return [json.loads(__A ) for line in buffer] class SCREAMING_SNAKE_CASE__ : @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ ).write() buffer.seek(0 ) a_ : Union[str, Any] = load_json_function(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ ).write() buffer.seek(0 ) a_ : List[Any] = load_json(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write() buffer.seek(0 ) a_ : Optional[int] = load_json_function(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , lines=SCREAMING_SNAKE_CASE__ , orient=SCREAMING_SNAKE_CASE__ , num_proc=2 ).write() buffer.seek(0 ) a_ : str = load_json(SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(SCREAMING_SNAKE_CASE__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(SCREAMING_SNAKE_CASE__ ) == 1_0 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: with pytest.raises(SCREAMING_SNAKE_CASE__ ): with io.BytesIO() as buffer: JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: a_ : Optional[int] = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" a_ : int = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compression=SCREAMING_SNAKE_CASE__ ).write() with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f: a_ : Optional[Any] = f.read() with fsspec.open(SCREAMING_SNAKE_CASE__ , 'rb' , compression='infer' ) as f: a_ : List[str] = f.read() assert exported_content == original_content
32
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" a_ : Tuple = [] for line in lines: a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments if line: filtered_lines.append(__A ) a_ : Tuple = '\n'.join(__A ) # Make a hash from all this code a_ : Tuple = full_str.encode('utf-8' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Dict = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
32
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ : Union[str, Any] = 256 class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''melgan'''] def __init__( self : Any , SCREAMING_SNAKE_CASE__ : SpectrogramNotesEncoder , SCREAMING_SNAKE_CASE__ : SpectrogramContEncoder , SCREAMING_SNAKE_CASE__ : TaFilmDecoder , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN a_ : Optional[Any] = math.log(1E-5 ) # Matches MelGAN training. a_ : Optional[Any] = 4.0 # Largest value for most examples a_ : Any = 1_2_8 self.register_modules( notes_encoder=SCREAMING_SNAKE_CASE__ , continuous_encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , melgan=SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> List[str]: a_ , a_ : str = output_range if clip: a_ : Dict = torch.clip(SCREAMING_SNAKE_CASE__ , self.min_value , self.max_value ) # Scale to [0, 1]. a_ : str = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=(-1.0, 1.0) , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> str: a_ , a_ : str = input_range a_ : Tuple = torch.clip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if clip else outputs # Scale to [0, 1]. a_ : str = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: a_ : Optional[Any] = input_tokens > 0 a_ , a_ : Tuple = self.notes_encoder( encoder_input_tokens=SCREAMING_SNAKE_CASE__ , encoder_inputs_mask=SCREAMING_SNAKE_CASE__ ) a_ , a_ : Union[str, Any] = self.continuous_encoder( encoder_inputs=SCREAMING_SNAKE_CASE__ , encoder_inputs_mask=SCREAMING_SNAKE_CASE__ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int: a_ : Union[str, Any] = noise_time if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ): a_ : int = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ) and len(timesteps.shape ) == 0: a_ : Dict = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML a_ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) a_ : Optional[Any] = self.decoder( encodings_and_masks=SCREAMING_SNAKE_CASE__ , decoder_input_tokens=SCREAMING_SNAKE_CASE__ , decoder_noise_time=SCREAMING_SNAKE_CASE__ ) return logits @torch.no_grad() def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[List[int]] , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "numpy" , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(SCREAMING_SNAKE_CASE__ )}.""" ) a_ : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) a_ : Any = np.zeros([1, 0, self.n_dims] , np.floataa ) a_ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=SCREAMING_SNAKE_CASE__ , device=self.device ) for i, encoder_input_tokens in enumerate(SCREAMING_SNAKE_CASE__ ): if i == 0: a_ : List[Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. a_ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=SCREAMING_SNAKE_CASE__ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. a_ : Optional[int] = ones a_ : Union[str, Any] = self.scale_features( SCREAMING_SNAKE_CASE__ , output_range=[-1.0, 1.0] , clip=SCREAMING_SNAKE_CASE__ ) a_ : Any = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=SCREAMING_SNAKE_CASE__ , continuous_mask=SCREAMING_SNAKE_CASE__ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop a_ : str = randn_tensor( shape=encoder_continuous_inputs.shape , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a_ : str = self.decode( encodings_and_masks=SCREAMING_SNAKE_CASE__ , input_tokens=SCREAMING_SNAKE_CASE__ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 a_ : List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample a_ : List[Any] = self.scale_to_features(SCREAMING_SNAKE_CASE__ , input_range=[-1.0, 1.0] ) a_ : Optional[Any] = mel[:1] a_ : List[str] = mel.cpu().float().numpy() a_ : List[str] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) logger.info('Generated segment' , SCREAMING_SNAKE_CASE__ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( 'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( 'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' ) if output_type == "numpy": a_ : List[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: a_ : Any = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE__ )
32
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = vocab_size a_ : List[str] = hidden_size a_ : List[str] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Optional[int] = intermediate_size a_ : int = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : str = max_position_embeddings a_ : List[str] = type_vocab_size a_ : List[str] = initializer_range a_ : Tuple = layer_norm_eps a_ : Optional[int] = embedding_size a_ : List[Any] = head_ratio a_ : List[Any] = conv_kernel_size a_ : Tuple = num_groups a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
32
1
import itertools import string from collections.abc import Generator, Iterable def SCREAMING_SNAKE_CASE_ ( __A : Iterable[str] , __A : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" a_ : List[str] = iter(__A ) while True: a_ : Optional[int] = tuple(itertools.islice(__A , __A ) ) if not chunk: return yield chunk def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str: """simple docstring""" a_ : Dict = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) a_ : Union[str, Any] = '' if len(__A ) < 2: return dirty for i in range(len(__A ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__A ) & 1: clean += "X" return clean def SCREAMING_SNAKE_CASE_ ( __A : str ) -> list[str]: """simple docstring""" a_ : Optional[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler a_ : Optional[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__A ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__A ) return table def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str: """simple docstring""" a_ : Optional[int] = generate_table(__A ) a_ : Tuple = prepare_input(__A ) a_ : List[Any] = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__A , 2 ): a_ , a_ : List[Any] = divmod(table.index(__A ) , 5 ) a_ , a_ : Dict = divmod(table.index(__A ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str: """simple docstring""" a_ : Union[str, Any] = generate_table(__A ) a_ : Union[str, Any] = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__A , 2 ): a_ , a_ : Optional[Any] = divmod(table.index(__A ) , 5 ) a_ , a_ : Dict = divmod(table.index(__A ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
1
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ : List[Any] = TypeVar('T') class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ) -> None: a_ : Optional[int] = data a_ : int = self a_ : str = 0 class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__( self : Optional[Any] ) -> None: # map from node name to the node object a_ : dict[T, DisjointSetTreeNode[T]] = {} def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : T ) -> None: # create a new set with x as its member a_ : Dict = DisjointSetTreeNode(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) a_ : List[Any] = self.map[data] if elem_ref != elem_ref.parent: a_ : Union[str, Any] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : DisjointSetTreeNode[T] , SCREAMING_SNAKE_CASE__ : DisjointSetTreeNode[T] ) -> None: # helper function for union operation if nodea.rank > nodea.rank: a_ : List[str] = nodea else: a_ : Optional[Any] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T ) -> None: # merge 2 disjoint sets self.link(self.find_set(SCREAMING_SNAKE_CASE__ ) , self.find_set(SCREAMING_SNAKE_CASE__ ) ) class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__( self : Optional[int] ) -> None: # connections: map from the node to the neighbouring nodes (with weights) a_ : dict[T, dict[T, int]] = {} def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: a_ : Optional[Any] = {} def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None: # add an edge with the given weight self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = weight a_ : List[Any] = weight def SCREAMING_SNAKE_CASE ( self : List[str] ) -> GraphUndirectedWeighted[T]: a_ : Tuple = [] a_ : Dict = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[2] ) # creating the disjoint set a_ : List[Any] = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(SCREAMING_SNAKE_CASE__ ) # MST generation a_ : List[str] = 0 a_ : Union[str, Any] = 0 a_ : Tuple = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a_ , a_ , a_ : Optional[int] = edges[index] index += 1 a_ : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE__ ) a_ : str = disjoint_set.find_set(SCREAMING_SNAKE_CASE__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) disjoint_set.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return graph
32
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
1
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration UpperCAmelCase_ : str = HfArgumentParser(InitializationArguments) UpperCAmelCase_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks UpperCAmelCase_ : Optional[Any] = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
32
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Optional[int] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mask2former''' snake_case__ : Any = ['''swin'''] snake_case__ : str = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) a_ : Dict = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = backbone_config.pop('model_type' ) a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) a_ : Dict = backbone_config a_ : List[str] = feature_size a_ : List[str] = mask_feature_size a_ : int = hidden_dim a_ : Dict = encoder_feedforward_dim a_ : str = activation_function a_ : List[str] = encoder_layers a_ : List[str] = decoder_layers a_ : Dict = num_attention_heads a_ : str = dropout a_ : Tuple = dim_feedforward a_ : List[str] = pre_norm a_ : Optional[int] = enforce_input_projection a_ : Any = common_stride a_ : Optional[int] = ignore_value a_ : int = num_queries a_ : Tuple = no_object_weight a_ : Dict = class_weight a_ : Optional[int] = mask_weight a_ : Optional[int] = dice_weight a_ : str = train_num_points a_ : List[str] = oversample_ratio a_ : List[Any] = importance_sample_ratio a_ : Any = init_std a_ : Union[str, Any] = init_xavier_std a_ : Union[str, Any] = use_auxiliary_loss a_ : Dict = feature_strides a_ : List[str] = output_auxiliary_logits a_ : Dict = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : List[Any] = self.backbone_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output
32
1
import enum import shutil import sys UpperCAmelCase_ , UpperCAmelCase_ : List[str] = shutil.get_terminal_size() UpperCAmelCase_ : List[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'} class SCREAMING_SNAKE_CASE__ ( enum.Enum ): snake_case__ : Optional[Any] = 0 snake_case__ : Dict = 1 def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[Any]="" ) -> List[str]: """simple docstring""" sys.stdout.write(str(__A ) + end ) sys.stdout.flush() def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : int , __A : List[Any]="" ) -> int: """simple docstring""" forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , __A ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" forceWrite('\r' ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : str ) -> Optional[int]: """simple docstring""" forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[str] , __A : List[str] ) -> Optional[int]: """simple docstring""" a_ : int = 0 if start < end: a_ : int = randint(__A , __A ) a_ : List[Any] = a[end] a_ : Optional[int] = a[pivot] a_ : Dict = temp a_ , a_ : List[str] = _in_place_partition(__A , __A , __A ) count += _in_place_quick_sort(__A , __A , p - 1 ) count += _in_place_quick_sort(__A , p + 1 , __A ) return count def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Dict , __A : Optional[Any] ) -> Optional[int]: """simple docstring""" a_ : List[str] = 0 a_ : int = randint(__A , __A ) a_ : Optional[int] = a[end] a_ : List[str] = a[pivot] a_ : Tuple = temp a_ : Optional[int] = start - 1 for index in range(__A , __A ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value a_ : List[Any] = new_pivot_index + 1 a_ : Dict = a[new_pivot_index] a_ : Optional[Any] = a[index] a_ : Any = temp a_ : str = a[new_pivot_index + 1] a_ : List[str] = a[end] a_ : Dict = temp return new_pivot_index + 1, count UpperCAmelCase_ : int = TemporaryFile() UpperCAmelCase_ : Dict = 100 # 1000 elements are to be sorted UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 0, 1 # mean and standard deviation UpperCAmelCase_ : List[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase_ : Any = np.load(outfile) UpperCAmelCase_ : Optional[int] = len(M) - 1 UpperCAmelCase_ : Tuple = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
from collections import defaultdict def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int: """simple docstring""" a_ : str = 1 a_ : Optional[Any] = True for v in tree[start]: if v not in visited: ret += dfs(__A ) if ret % 2 == 0: cuts.append(__A ) return ret def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" dfs(1 ) if __name__ == "__main__": UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 10, 9 UpperCAmelCase_ : List[Any] = defaultdict(list) UpperCAmelCase_ : dict[int, bool] = {} UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
32
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
1
from ...configuration_utils import PretrainedConfig UpperCAmelCase_ : Any = { 'google/tapas-base-finetuned-sqa': ( 'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json' ), 'google/tapas-base-finetuned-wtq': ( 'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json' ), 'google/tapas-base-finetuned-wikisql-supervised': ( 'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json' ), 'google/tapas-base-finetuned-tabfact': ( 'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[Any] = '''tapas''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=10.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]="ratio" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[str]=6_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : str , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) a_ : Any = vocab_size a_ : int = hidden_size a_ : List[str] = num_hidden_layers a_ : Optional[Any] = num_attention_heads a_ : Optional[int] = hidden_act a_ : List[Any] = intermediate_size a_ : Union[str, Any] = hidden_dropout_prob a_ : Union[str, Any] = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : int = type_vocab_sizes a_ : str = initializer_range a_ : Optional[Any] = layer_norm_eps # Fine-tuning task hyperparameters a_ : List[Any] = positive_label_weight a_ : int = num_aggregation_labels a_ : Union[str, Any] = aggregation_loss_weight a_ : Dict = use_answer_as_supervision a_ : Optional[int] = answer_loss_importance a_ : Any = use_normalized_answer_loss a_ : Optional[int] = huber_loss_delta a_ : Tuple = temperature a_ : str = aggregation_temperature a_ : Union[str, Any] = use_gumbel_for_cells a_ : Tuple = use_gumbel_for_aggregation a_ : Any = average_approximation_function a_ : Dict = cell_selection_preference a_ : Any = answer_loss_cutoff a_ : Dict = max_num_rows a_ : str = max_num_columns a_ : Union[str, Any] = average_logits_per_cell a_ : List[Any] = select_one_column a_ : Any = allow_empty_column_selection a_ : int = init_cell_selection_weights_to_zero a_ : List[Any] = reset_position_index_per_cell a_ : Tuple = disable_per_token_loss # Aggregation hyperparameters a_ : Union[str, Any] = aggregation_labels a_ : Optional[int] = no_aggregation_label_index if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE__ ): a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in aggregation_labels.items()}
32
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
1
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape a_ : List[str] = jax.image.resize( SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a_ : str = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : int = None snake_case__ : float = 0.0 snake_case__ : bool = None snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : Any = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype ) a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : int = nn.Dropout(self.dropout_prob ) a_ : Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a_ : List[Any] = None if use_nin_shortcut: a_ : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int: a_ : List[Any] = hidden_states a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ ) a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) ) a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 ) a_ : Optional[int] = hidden_states + temb a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ ) if self.conv_shortcut is not None: a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ ) return hidden_states + residual
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Tuple = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
32
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : int = logging.get_logger(__name__) @add_end_docstrings(lowercase__ ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=None ) -> List[Any]: a_ : Tuple = {} if top_k is not None: a_ : int = top_k return {}, {}, postprocess_params def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: a_ : List[str] = load_image(SCREAMING_SNAKE_CASE__ ) a_ : int = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict: a_ : str = self.model(**SCREAMING_SNAKE_CASE__ ) return model_outputs def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=5 ) -> Any: if top_k > self.model.config.num_labels: a_ : Dict = self.model.config.num_labels if self.framework == "pt": a_ : Any = model_outputs.logits.softmax(-1 )[0] a_ , a_ : List[str] = probs.topk(SCREAMING_SNAKE_CASE__ ) elif self.framework == "tf": a_ : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0] a_ : Optional[Any] = tf.math.top_k(SCREAMING_SNAKE_CASE__ , k=SCREAMING_SNAKE_CASE__ ) a_ , a_ : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) a_ : Optional[int] = scores.tolist() a_ : Any = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
32
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Any = GPTSanJapaneseTokenizer snake_case__ : Tuple = False snake_case__ : str = {'''do_clean_text''': False, '''add_prefix_space''': False} def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: super().setUp() # fmt: off a_ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on a_ : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 a_ : List[Any] = {'unk_token': '<unk>'} a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' a_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict: a_ , a_ : Union[str, Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: a_ : List[str] = self.get_tokenizer() # Testing tokenization a_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。' a_ : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens a_ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] a_ : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens a_ : int = tokens + [tokenizer.unk_token] a_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization a_ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' a_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' a_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : List[Any] = 'こんにちは、世界。' a_ : int = 'こんばんは、㔺界。😀' a_ : Dict = 'こんにちは、世界。こんばんは、世界。😀' a_ : Optional[int] = tokenizer.encode(prefix_text + input_text ) a_ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : str = 'こんにちは、世界。' a_ : List[str] = 'こんばんは、㔺界。😀' a_ : str = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Tuple = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1) a_ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] a_ : Tuple = [1] + [1] * (len_prefix) + [0] * (len_text + 1) a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids a_ : Union[str, Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: a_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[int] = tokenizer.encode('あンいワ' ) a_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' ) a_ : Dict = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: a_ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # fmt: off a_ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] a_ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: # tokenizer has no padding token pass
32
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : int = 0.9 , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : str = size if size is not None else {'shortest_edge': 2_2_4} a_ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' ) a_ : List[Any] = do_resize a_ : str = size a_ : List[str] = crop_pct a_ : Union[str, Any] = resample a_ : Optional[Any] = do_center_crop a_ : List[Any] = crop_size a_ : str = do_rescale a_ : str = rescale_factor a_ : Optional[Any] = do_normalize a_ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN a_ : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> np.ndarray: a_ : int = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: a_ : Dict = int(size['shortest_edge'] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: a_ : int = int(size['height'] / crop_pct ) else: a_ : List[str] = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct )) else: raise ValueError('Invalid size for resize: {}'.format(SCREAMING_SNAKE_CASE__ ) ) a_ : Union[str, Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) else: if "shortest_edge" in size: a_ : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) elif "height" in size and "width" in size: a_ : Dict = (size['height'], size['width']) else: raise ValueError('Invalid size for resize: {}'.format(SCREAMING_SNAKE_CASE__ ) ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> np.ndarray: a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Tuple: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> PIL.Image.Image: a_ : Dict = do_resize if do_resize is not None else self.do_resize a_ : int = crop_pct if crop_pct is not None else self.crop_pct a_ : Dict = resample if resample is not None else self.resample a_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : str = do_rescale if do_rescale is not None else self.do_rescale a_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : int = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : List[str] = image_std if image_std is not None else self.image_std a_ : List[Any] = size if size is not None else self.size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : str = crop_size if crop_size is not None else self.crop_size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' ) a_ : List[str] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_pct is None: raise ValueError('Crop_pct must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a_ : int = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , crop_pct=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : List[str] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : int = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Tuple = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : str = size if size is not None else {'shortest_edge': 2_5_6} a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = do_resize a_ : Dict = size a_ : Optional[Any] = resample a_ : Optional[int] = do_center_crop a_ : Dict = crop_size a_ : int = do_rescale a_ : int = rescale_factor a_ : Tuple = do_normalize a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]: a_ : List[str] = do_resize if do_resize is not None else self.do_resize a_ : Dict = size if size is not None else self.size a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = resample if resample is not None else self.resample a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : int = crop_size if crop_size is not None else self.crop_size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Any = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : Dict = image_std if image_std is not None else self.image_std a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Tuple = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
1
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict: """simple docstring""" a_ : Tuple = script.contents[0] a_ : int = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Tuple = F"""https://www.instagram.com/{username}/""" a_ : Optional[Any] = self.get_json() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict: a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ) -> str: return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: return self.user_data["username"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool: return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions a_ : int = InstagramUser(__A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
32
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]: """simple docstring""" a_ : Any = int(__A ) # Initialize Result a_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__A ): # Find denominations while int(__A ) >= int(__A ): total_value -= int(__A ) answer.append(__A ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F'Following is minimal change for {value}: ') UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
32
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape a_ : List[str] = jax.image.resize( SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a_ : str = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : int = None snake_case__ : float = 0.0 snake_case__ : bool = None snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : Any = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype ) a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : int = nn.Dropout(self.dropout_prob ) a_ : Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a_ : List[Any] = None if use_nin_shortcut: a_ : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int: a_ : List[Any] = hidden_states a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ ) a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) ) a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 ) a_ : Optional[int] = hidden_states + temb a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ ) if self.conv_shortcut is not None: a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ ) return hidden_states + residual
32
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : Dict = ['a', 'b', 'c'] # Defaults to last layer if both are None a_ , a_ : List[str] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['c'] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] ) # Out indices set to match out features a_ , a_ : str = get_aligned_output_features_output_indices(['a', 'c'] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] ) # Out features set to match out indices a_ , a_ : int = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] ) # Out features selected from negative indices a_ , a_ : Dict = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['a', 'c'] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: # Stage names must be set with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , SCREAMING_SNAKE_CASE__ ) # Out features must be a list with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(SCREAMING_SNAKE_CASE__ ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def SCREAMING_SNAKE_CASE ( self : str ) -> str: a_ : Tuple = BackboneMixin() a_ : Optional[int] = ['a', 'b', 'c'] a_ : List[str] = ['a', 'c'] a_ : Optional[Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly a_ : Dict = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) a_ : List[str] = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
32
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: a_ : Optional[int] = 1 a_ : Union[str, Any] = 3 a_ : Union[str, Any] = (3_2, 3_2) a_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) return image @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: torch.manual_seed(0 ) a_ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: torch.manual_seed(0 ) a_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: def extract(*SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ): class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] ) -> List[Any]: a_ : str = torch.ones([0] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: self.pixel_values.to(SCREAMING_SNAKE_CASE__ ) return self return Out() return extract def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: a_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Optional[Any] = self.dummy_cond_unet a_ : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) a_ : Optional[Any] = self.dummy_vae a_ : int = self.dummy_text_encoder a_ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk a_ : List[Any] = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = 'A painting of a squirrel eating a burger' a_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : List[Any] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) a_ : str = output.images a_ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Optional[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] a_ : str = image[0, -3:, -3:, -1] a_ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a_ : Dict = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.dummy_cond_unet a_ : List[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) a_ : Any = self.dummy_vae a_ : Optional[Any] = self.dummy_text_encoder a_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk a_ : Any = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : int = 'A painting of a squirrel eating a burger' a_ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Any = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) a_ : Any = output.images a_ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) a_ : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] a_ : Tuple = image[0, -3:, -3:, -1] a_ : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a_ : Any = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: a_ : Optional[Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE__ ) assert pipe.safety_checker is None a_ : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None a_ : str = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: a_ : Union[str, Any] = self.dummy_cond_unet a_ : Dict = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) a_ : Any = self.dummy_vae a_ : Tuple = self.dummy_text_encoder a_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 a_ : Union[str, Any] = unet.half() a_ : Dict = vae.half() a_ : Tuple = bert.half() # make sure here that pndm scheduler skips prk a_ : Tuple = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'A painting of a squirrel eating a burger' a_ : Tuple = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: a_ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a_ : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) a_ : Dict = 4_0_0_3_6_6_0_3_4_6 a_ : Union[str, Any] = 7 # without safety guidance (sld_guidance_scale = 0) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : str = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : List[str] = output.images a_ : Union[str, Any] = image[0, -3:, -3:, -1] a_ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) a_ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : str = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : str = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : int = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : int = 'padme amidala taking a bath artwork, safe for work, no nudity' a_ : Any = 2_7_3_4_9_7_1_7_5_5 a_ : str = 7 a_ : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : Optional[int] = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 a_ : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Any = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : Optional[Any] = output.images a_ : Any = image[0, -3:, -3:, -1] a_ : Any = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> int: a_ : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) a_ : Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) a_ : List[Any] = 1_0_4_4_3_5_5_2_3_4 a_ : int = 1_2 a_ : str = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a_ : Dict = output.images a_ : Optional[int] = image[0, -3:, -3:, -1] a_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 a_ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=5_0 , output_type='np' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a_ : Tuple = output.images a_ : str = image[0, -3:, -3:, -1] a_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
32
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[str] , __A : Any ) -> str: """simple docstring""" if openai_config_file == "": a_ : Optional[Any] = OpenAIGPTConfig() else: a_ : int = OpenAIGPTConfig.from_json_file(__A ) a_ : Tuple = OpenAIGPTModel(__A ) # Load weights from numpy load_tf_weights_in_openai_gpt(__A , __A , __A ) # Save pytorch-model a_ : Tuple = pytorch_dump_folder_path + '/' + WEIGHTS_NAME a_ : int = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , __A ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__A , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--openai_checkpoint_folder_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--openai_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) UpperCAmelCase_ : Any = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
32
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict: """simple docstring""" a_ : Tuple = script.contents[0] a_ : int = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Tuple = F"""https://www.instagram.com/{username}/""" a_ : Optional[Any] = self.get_json() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict: a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ) -> str: return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: return self.user_data["username"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool: return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions a_ : int = InstagramUser(__A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
32
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ : int = logging.get_logger(__name__) # General docstring UpperCAmelCase_ : Tuple = 'ResNetConfig' # Base docstring UpperCAmelCase_ : List[str] = 'microsoft/resnet-50' UpperCAmelCase_ : Optional[int] = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ : str = 'microsoft/resnet-50' UpperCAmelCase_ : Tuple = 'tiger cat' UpperCAmelCase_ : str = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> int: super().__init__() a_ : Optional[Any] = nn.Convad( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ ) a_ : int = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) a_ : Any = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor: a_ : Tuple = self.convolution(SCREAMING_SNAKE_CASE__ ) a_ : Dict = self.normalization(SCREAMING_SNAKE_CASE__ ) a_ : Dict = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> int: super().__init__() a_ : str = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) a_ : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) a_ : Optional[Any] = config.num_channels def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor: a_ : List[str] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) a_ : Optional[Any] = self.embedder(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.pooler(SCREAMING_SNAKE_CASE__ ) return embedding class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict: super().__init__() a_ : Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor: a_ : Any = self.convolution(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.normalization(SCREAMING_SNAKE_CASE__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Optional[Any]: super().__init__() a_ : Union[str, Any] = in_channels != out_channels or stride != 1 a_ : Union[str, Any] = ( ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) a_ : int = nn.Sequential( ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , ) a_ : Any = ACTaFN[activation] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: a_ : List[str] = hidden_state a_ : Optional[Any] = self.layer(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual a_ : int = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> Optional[Any]: super().__init__() a_ : Tuple = in_channels != out_channels or stride != 1 a_ : Any = out_channels // reduction a_ : Optional[Any] = ( ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity() ) a_ : int = nn.Sequential( ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , ) a_ : Optional[int] = ACTaFN[activation] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple: a_ : Any = hidden_state a_ : Dict = self.layer(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE__ ) hidden_state += residual a_ : Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> Optional[int]: super().__init__() a_ : List[Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer a_ : Union[str, Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor: a_ : Any = input for layer in self.layers: a_ : Optional[Any] = layer(SCREAMING_SNAKE_CASE__ ) return hidden_state class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> str: super().__init__() a_ : Tuple = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) a_ : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ): self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention: a_ : Union[str, Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a_ : str = hidden_states + (hidden_state,) a_ : List[Any] = stage_module(SCREAMING_SNAKE_CASE__ ) if output_hidden_states: a_ : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[Any] = ResNetConfig snake_case__ : Optional[int] = '''resnet''' snake_case__ : int = '''pixel_values''' snake_case__ : str = True def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=False ) -> Any: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : List[Any] = value UpperCAmelCase_ : List[str] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCAmelCase_ : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase__ , ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: super().__init__(SCREAMING_SNAKE_CASE__ ) a_ : int = config a_ : Optional[int] = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ ) a_ : Any = ResNetEncoder(SCREAMING_SNAKE_CASE__ ) a_ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: a_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict a_ : List[str] = self.embedder(SCREAMING_SNAKE_CASE__ ) a_ : str = self.encoder( SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) a_ : str = encoder_outputs[0] a_ : List[str] = self.pooler(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowercase__ , ) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: super().__init__(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = config.num_labels a_ : List[str] = ResNetModel(SCREAMING_SNAKE_CASE__ ) # classification head a_ : Dict = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: a_ : str = return_dict if return_dict is not None else self.config.use_return_dict a_ : Dict = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) a_ : str = outputs.pooler_output if return_dict else outputs[1] a_ : Any = self.classifier(SCREAMING_SNAKE_CASE__ ) a_ : Dict = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a_ : Optional[Any] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a_ : Optional[Any] = 'single_label_classification' else: a_ : Optional[int] = 'multi_label_classification' if self.config.problem_type == "regression": a_ : str = MSELoss() if self.num_labels == 1: a_ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: a_ : List[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif self.config.problem_type == "single_label_classification": a_ : Optional[int] = CrossEntropyLoss() a_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": a_ : List[Any] = BCEWithLogitsLoss() a_ : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not return_dict: a_ : Union[str, Any] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , lowercase__ , ) class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: super().__init__(SCREAMING_SNAKE_CASE__ ) super()._init_backbone(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = [config.embedding_size] + config.hidden_sizes a_ : Dict = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = ResNetEncoder(SCREAMING_SNAKE_CASE__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ ) @replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput: a_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict a_ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a_ : Dict = self.embedder(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = outputs.hidden_states a_ : Dict = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: a_ : Optional[int] = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
32
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = ['''image_processor''', '''tokenizer'''] snake_case__ : Union[str, Any] = '''CLIPImageProcessor''' snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any: a_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = kwargs.pop('feature_extractor' ) a_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: a_ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : str = self.tokenizer.model_input_names a_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor
32
1
from __future__ import annotations import math def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : bool , __A : list[int] , __A : float ) -> int: """simple docstring""" if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(__A ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , ) return min( minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , ) def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" a_ : Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] a_ : int = math.log(len(__A ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , __A , __A , __A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
32
from __future__ import annotations UpperCAmelCase_ : Tuple = [] def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool: """simple docstring""" for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool: """simple docstring""" if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): a_ : Any = 1 solve(__A , row + 1 ) a_ : Tuple = 0 return False def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) UpperCAmelCase_ : List[str] = 8 UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
32
1
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] UpperCAmelCase_ : Optional[int] = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Tuple: """simple docstring""" a_ : str = torch.load(__A , map_location='cpu' ) return sd def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any] , __A : Dict=rename_keys_prefix ) -> List[Any]: """simple docstring""" a_ : str = OrderedDict() a_ : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue a_ : Optional[int] = key for name_pair in rename_keys_prefix: a_ : Union[str, Any] = new_key.replace(name_pair[0] , name_pair[1] ) a_ : Dict = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately a_ : Optional[Any] = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] ) -> int: """simple docstring""" assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: a_ : Dict = 'pretraining' if "vcr" in checkpoint_path: a_ : Any = {'visual_embedding_dim': 5_12} elif "vqa_advanced" in checkpoint_path: a_ : Optional[int] = {'visual_embedding_dim': 20_48} elif "vqa" in checkpoint_path: a_ : str = {'visual_embedding_dim': 20_48} elif "nlvr" in checkpoint_path: a_ : List[Any] = {'visual_embedding_dim': 10_24} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: a_ : Dict = {'visual_embedding_dim': 5_12} a_ : Union[str, Any] = 'multichoice' elif "vqa_advanced" in checkpoint_path: a_ : Union[str, Any] = {'visual_embedding_dim': 20_48} a_ : int = 'vqa_advanced' elif "vqa" in checkpoint_path: a_ : int = {'visual_embedding_dim': 20_48, 'num_labels': 31_29} a_ : Tuple = 'vqa' elif "nlvr" in checkpoint_path: a_ : Dict = { 'visual_embedding_dim': 10_24, 'num_labels': 2, } a_ : int = 'nlvr' a_ : List[str] = VisualBertConfig(**__A ) # Load State Dict a_ : List[Any] = load_state_dict(__A ) a_ : str = get_new_dict(__A , __A ) if model_type == "pretraining": a_ : int = VisualBertForPreTraining(__A ) elif model_type == "vqa": a_ : str = VisualBertForQuestionAnswering(__A ) elif model_type == "nlvr": a_ : Optional[Any] = VisualBertForVisualReasoning(__A ) elif model_type == "multichoice": a_ : Tuple = VisualBertForMultipleChoice(__A ) model.load_state_dict(__A ) # Save Checkpoints Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') UpperCAmelCase_ : List[Any] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
32
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
1
import collections import importlib.util import os import re from pathlib import Path UpperCAmelCase_ : int = 'src/transformers' # Matches is_xxx_available() UpperCAmelCase_ : Any = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} UpperCAmelCase_ : Any = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] UpperCAmelCase_ : Dict = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available UpperCAmelCase_ : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") UpperCAmelCase_ : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] UpperCAmelCase_ : List[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", UpperCAmelCase_ : int = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], UpperCAmelCase_ : Optional[Any] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo UpperCAmelCase_ : Any = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: UpperCAmelCase_ : Tuple = re.compile(R'^\s*try:') # Catches a line with else: UpperCAmelCase_ : Any = re.compile(R'^\s*else:') def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[str]: """simple docstring""" if _re_test_backend.search(__A ) is None: return None a_ : Dict = [b[0] for b in _re_backend.findall(__A )] backends.sort() return "_and_".join(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> str: """simple docstring""" with open(__A , 'r' , encoding='utf-8' , newline='\n' ) as f: a_ : Optional[Any] = f.readlines() a_ : Optional[int] = 0 while line_index < len(__A ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__A ): return None # First grab the objects without a specific backend in _import_structure a_ : Optional[Any] = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: a_ : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__A ): a_ : Union[str, Any] = _re_one_line_import_struct.search(__A ).groups()[0] a_ : int = re.findall('\[([^\]]+)\]' , __A ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue a_ : Any = _re_import_struct_key_value.search(__A ) if single_line_import_search is not None: a_ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__A ) > 0] objects.extend(__A ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 a_ : Dict = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. a_ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: a_ : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 a_ : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): a_ : Optional[Any] = lines[line_index] if _re_import_struct_add_one.search(__A ) is not None: objects.append(_re_import_struct_add_one.search(__A ).groups()[0] ) elif _re_import_struct_add_many.search(__A ) is not None: a_ : Tuple = _re_import_struct_add_many.search(__A ).groups()[0].split(', ' ) a_ : Tuple = [obj[1:-1] for obj in imports if len(__A ) > 0] objects.extend(__A ) elif _re_between_brackets.search(__A ) is not None: a_ : Tuple = _re_between_brackets.search(__A ).groups()[0].split(', ' ) a_ : Tuple = [obj[1:-1] for obj in imports if len(__A ) > 0] objects.extend(__A ) elif _re_quote_object.search(__A ) is not None: objects.append(_re_quote_object.search(__A ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 a_ : str = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend a_ : str = [] while ( line_index < len(__A ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): a_ : Optional[Any] = lines[line_index] a_ : Any = _re_import.search(__A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 a_ : Any = {'none': objects} # Let's continue with backend-specific objects while line_index < len(__A ): # If the line is an if is_backend_available, we grab all objects associated. a_ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: a_ : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 a_ : Optional[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): a_ : Optional[Any] = lines[line_index] a_ : List[Any] = _re_import.search(__A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 a_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Any ) -> int: """simple docstring""" def find_duplicates(__A : List[Any] ): return [k for k, v in collections.Counter(__A ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] a_ : Tuple = [] for key in import_dict_objects.keys(): a_ : Dict = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) a_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): a_ : Optional[Any] = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" a_ : Dict = [] for root, _, files in os.walk(__A ): if "__init__.py" in files: a_ : List[Any] = os.path.join(__A , '__init__.py' ) a_ : Optional[Any] = parse_init(__A ) if objects is not None: a_ : List[Any] = analyze_results(*__A ) if len(__A ) > 0: a_ : Any = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(__A ) ) if len(__A ) > 0: raise ValueError('\n\n'.join(__A ) ) def SCREAMING_SNAKE_CASE_ ( ) -> str: """simple docstring""" a_ : str = [] for path, directories, files in os.walk(__A ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(__A ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__A ) / folder).glob('*.py' ) ) ) == 0: continue a_ : Optional[Any] = str((Path(__A ) / folder).relative_to(__A ) ) a_ : List[Any] = short_path.replace(os.path.sep , '.' ) submodules.append(__A ) for fname in files: if fname == "__init__.py": continue a_ : int = str((Path(__A ) / fname).relative_to(__A ) ) a_ : Optional[Any] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(__A ) return submodules UpperCAmelCase_ : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: """simple docstring""" a_ : int = importlib.util.spec_from_file_location( 'transformers' , os.path.join(__A , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) a_ : Dict = spec.loader.load_module() a_ : List[str] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(__A ) > 0: a_ : Union[str, Any] = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
32
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
1
import json import sys def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[Any] ) -> int: """simple docstring""" with open(__A , encoding='utf-8' ) as f: a_ : Optional[int] = json.load(__A ) a_ : List[Any] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(__A ): a_ : Dict = results[benchmark_name] a_ : Union[str, Any] = benchmark_name.split('/' )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) a_ : Any = '| metric |' a_ : int = '|--------|' a_ : Optional[int] = '| new / old (diff) |' for metric_name in sorted(__A ): a_ : int = benchmark_res[metric_name] a_ : List[str] = metric_vals['new'] a_ : Tuple = metric_vals.get('old' , __A ) a_ : Union[str, Any] = metric_vals.get('diff' , __A ) a_ : int = F""" {new_val:f}""" if isinstance(__A , (int, float) ) else 'None' if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(__A , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(__A , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(__A , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(__A ) ) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = sys.argv[1] UpperCAmelCase_ : List[str] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
32
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
1
import math import unittest def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool: """simple docstring""" assert isinstance(__A , __A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(1_1 ) ) self.assertTrue(is_prime(1_3 ) ) self.assertTrue(is_prime(1_7 ) ) self.assertTrue(is_prime(1_9 ) ) self.assertTrue(is_prime(2_3 ) ) self.assertTrue(is_prime(2_9 ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: with self.assertRaises(SCREAMING_SNAKE_CASE__ ): is_prime(-1_9 ) self.assertFalse( is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
32
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" a_ : Tuple = [] for line in lines: a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments if line: filtered_lines.append(__A ) a_ : Tuple = '\n'.join(__A ) # Make a hash from all this code a_ : Tuple = full_str.encode('utf-8' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Dict = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
32
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Any = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> str: """simple docstring""" a_ : Optional[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a_ : Dict = 1_28 elif "12-12" in model_name: a_ : int = 12 a_ : Tuple = 12 elif "14-14" in model_name: a_ : str = 14 a_ : List[Any] = 14 elif "16-16" in model_name: a_ : Optional[Any] = 16 a_ : str = 16 else: raise ValueError('Model not supported' ) a_ : Dict = 'huggingface/label-files' if "speech-commands" in model_name: a_ : Optional[Any] = 35 a_ : Optional[Any] = 'speech-commands-v2-id2label.json' else: a_ : Any = 5_27 a_ : int = 'audioset-id2label.json' a_ : List[str] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) ) a_ : str = {int(__A ): v for k, v in idalabel.items()} a_ : Union[str, Any] = idalabel a_ : Optional[Any] = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Optional[int]: """simple docstring""" if "module.v" in name: a_ : Union[str, Any] = name.replace('module.v' , 'audio_spectrogram_transformer' ) if "cls_token" in name: a_ : Any = name.replace('cls_token' , 'embeddings.cls_token' ) if "dist_token" in name: a_ : Dict = name.replace('dist_token' , 'embeddings.distillation_token' ) if "pos_embed" in name: a_ : Dict = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: a_ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) # transformer blocks if "blocks" in name: a_ : Dict = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: a_ : int = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: a_ : Union[str, Any] = name.replace('attn' , 'attention.self' ) if "norm1" in name: a_ : Tuple = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: a_ : Any = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: a_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: a_ : Any = name.replace('mlp.fc2' , 'output.dense' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' ) # classifier head if "module.mlp_head.0" in name: a_ : Any = name.replace('module.mlp_head.0' , 'classifier.layernorm' ) if "module.mlp_head.1" in name: a_ : str = name.replace('module.mlp_head.1' , 'classifier.dense' ) return name def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[Any] ) -> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): a_ : Dict = orig_state_dict.pop(__A ) if "qkv" in key: a_ : Optional[Any] = key.split('.' ) a_ : Optional[int] = int(key_split[3] ) a_ : Dict = config.hidden_size if "weight" in key: a_ : Optional[Any] = val[:dim, :] a_ : Optional[Any] = val[dim : dim * 2, :] a_ : List[Any] = val[-dim:, :] else: a_ : List[str] = val[:dim] a_ : Optional[int] = val[dim : dim * 2] a_ : str = val[-dim:] else: a_ : Dict = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Union[str, Any]: """simple docstring""" a_ : Optional[Any] = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(__A , __A ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[str] , __A : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" a_ : Dict = get_audio_spectrogram_transformer_config(__A ) a_ : Any = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict a_ : Dict = model_name_to_url[model_name] a_ : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location='cpu' ) # remove some keys remove_keys(__A ) # rename some keys a_ : List[str] = convert_state_dict(__A , __A ) # load 🤗 model a_ : List[str] = ASTForAudioClassification(__A ) model.eval() model.load_state_dict(__A ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a_ : Union[str, Any] = -4.2677393 if 'speech-commands' not in model_name else -6.845978 a_ : Union[str, Any] = 4.5689974 if 'speech-commands' not in model_name else 5.5654526 a_ : List[str] = 10_24 if 'speech-commands' not in model_name else 1_28 a_ : Dict = ASTFeatureExtractor(mean=__A , std=__A , max_length=__A ) if "speech-commands" in model_name: a_ : str = load_dataset('speech_commands' , 'v0.02' , split='validation' ) a_ : Dict = dataset[0]['audio']['array'] else: a_ : List[str] = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , ) a_ , a_ : Any = torchaudio.load(__A ) a_ : Dict = waveform.squeeze().numpy() a_ : Tuple = feature_extractor(__A , sampling_rate=1_60_00 , return_tensors='pt' ) # forward pass a_ : Union[str, Any] = model(**__A ) a_ : Optional[Any] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a_ : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a_ : Dict = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a_ : Union[str, Any] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a_ : str = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a_ : Tuple = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a_ : int = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a_ : str = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": a_ : List[str] = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('Unknown model name' ) if not torch.allclose(logits[0, :3] , __A , atol=1e-4 ): raise ValueError('Logits don\'t match' ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(__A ).mkdir(exist_ok=__A ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(__A ) if push_to_hub: print('Pushing model and feature extractor to the hub...' ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase_ : str = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
32
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = vocab_size a_ : List[str] = hidden_size a_ : List[str] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Optional[int] = intermediate_size a_ : int = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : str = max_position_embeddings a_ : List[str] = type_vocab_size a_ : List[str] = initializer_range a_ : Tuple = layer_norm_eps a_ : Optional[int] = embedding_size a_ : List[Any] = head_ratio a_ : List[Any] = conv_kernel_size a_ : Tuple = num_groups a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
32
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate UpperCAmelCase_ : Union[str, Any] = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} UpperCAmelCase_ : str = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', 'emoji': True, }, } ] UpperCAmelCase_ : Any = 0 for log in Path().glob('*.log'): UpperCAmelCase_ : Dict = 0 with open(log, 'r') as f: for line in f: UpperCAmelCase_ : int = json.loads(line) if line.get('nodeid', '') != "": UpperCAmelCase_ : List[Any] = line['nodeid'] if line.get('duration', None) is not None: UpperCAmelCase_ : Any = F'{line["duration"]:.4f}' if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) UpperCAmelCase_ : Any = [] log.unlink() UpperCAmelCase_ : Optional[int] = '' UpperCAmelCase_ : Optional[Any] = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = {} for test in failed_tests: UpperCAmelCase_ : List[str] = test[0].split('::') UpperCAmelCase_ : Union[str, Any] = data[0].split('/')[-1] if data[0] not in filesafailed: UpperCAmelCase_ : int = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) UpperCAmelCase_ : List[str] = [test[0] for test in failed_table] UpperCAmelCase_ : List[Any] = list(set(files)) # Count number of instances in failed_tests UpperCAmelCase_ : Optional[int] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) UpperCAmelCase_ : List[Any] = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: UpperCAmelCase_ : Optional[Any] = 'Too many failed tests, please see the full report in the Action results.' UpperCAmelCase_ : int = len(err) + 10 UpperCAmelCase_ : int = message[: 3000 - offset] + F'\n...\n```\n{err}' print(F'### {message}') else: UpperCAmelCase_ : Union[str, Any] = 'No failed tests! 🤗' print(F'## {message}') payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient UpperCAmelCase_ : Dict = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": UpperCAmelCase_ : Any = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) UpperCAmelCase_ : Optional[int] = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) UpperCAmelCase_ : str = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) UpperCAmelCase_ : str = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) UpperCAmelCase_ : str = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name UpperCAmelCase_ : List[Any] = '' for i, row in enumerate(test_failures): if row[0] != test_class: UpperCAmelCase_ : Tuple = row[0] else: UpperCAmelCase_ : Union[str, Any] = '' UpperCAmelCase_ : int = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
1
import math import sys def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str: """simple docstring""" a_ : Any = '' try: with open(__A , 'rb' ) as binary_file: a_ : int = binary_file.read() for dat in data: a_ : Dict = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str: """simple docstring""" a_ : List[Any] = {'0': '0', '1': '1'} a_ , a_ : Dict = '', '' a_ : Optional[Any] = len(__A ) for i in range(len(__A ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue a_ : str = lexicon[curr_string] result += last_match_id a_ : List[Any] = last_match_id + '0' if math.loga(__A ).is_integer(): a_ : List[Any] = {} for curr_key in list(__A ): a_ : Tuple = lexicon.pop(__A ) a_ : List[Any] = new_lex a_ : str = last_match_id + '1' index += 1 a_ : int = '' return result def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> None: """simple docstring""" a_ : Any = 8 try: with open(__A , 'wb' ) as opened_file: a_ : Optional[Any] = [ to_write[i : i + byte_length] for i in range(0 , len(__A ) , __A ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str: """simple docstring""" a_ : Tuple = 0 for letter in data_bits: if letter == "1": break counter += 1 a_ : Any = data_bits[counter:] a_ : List[Any] = data_bits[counter + 1 :] return data_bits def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> None: """simple docstring""" a_ : Dict = read_file_binary(__A ) a_ : Any = remove_prefix(__A ) a_ : Dict = decompress_data(__A ) write_file_binary(__A , __A ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
32
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Any = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = '''beit''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Any=8_1_9_2 , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE__ : Any=2_2_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_6 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 5, 7, 1_1] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Any=0.4 , SCREAMING_SNAKE_CASE__ : Any=2_5_6 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_5 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = vocab_size a_ : str = hidden_size a_ : int = num_hidden_layers a_ : Any = num_attention_heads a_ : Any = intermediate_size a_ : Optional[Any] = hidden_act a_ : str = hidden_dropout_prob a_ : List[Any] = attention_probs_dropout_prob a_ : Optional[Any] = initializer_range a_ : Any = layer_norm_eps a_ : Dict = image_size a_ : List[str] = patch_size a_ : Optional[int] = num_channels a_ : str = use_mask_token a_ : Optional[int] = use_absolute_position_embeddings a_ : Union[str, Any] = use_relative_position_bias a_ : Optional[Any] = use_shared_relative_position_bias a_ : Optional[int] = layer_scale_init_value a_ : str = drop_path_rate a_ : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) a_ : str = out_indices a_ : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) a_ : Optional[int] = use_auxiliary_head a_ : Optional[Any] = auxiliary_loss_weight a_ : List[str] = auxiliary_channels a_ : List[str] = auxiliary_num_convs a_ : Dict = auxiliary_concat_input a_ : str = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Dict = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1E-4
32
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Optional[int] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mask2former''' snake_case__ : Any = ['''swin'''] snake_case__ : str = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) a_ : Dict = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = backbone_config.pop('model_type' ) a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) a_ : Dict = backbone_config a_ : List[str] = feature_size a_ : List[str] = mask_feature_size a_ : int = hidden_dim a_ : Dict = encoder_feedforward_dim a_ : str = activation_function a_ : List[str] = encoder_layers a_ : List[str] = decoder_layers a_ : Dict = num_attention_heads a_ : str = dropout a_ : Tuple = dim_feedforward a_ : List[str] = pre_norm a_ : Optional[int] = enforce_input_projection a_ : Any = common_stride a_ : Optional[int] = ignore_value a_ : int = num_queries a_ : Tuple = no_object_weight a_ : Dict = class_weight a_ : Optional[int] = mask_weight a_ : Optional[int] = dice_weight a_ : str = train_num_points a_ : List[str] = oversample_ratio a_ : List[Any] = importance_sample_ratio a_ : Any = init_std a_ : Union[str, Any] = init_xavier_std a_ : Union[str, Any] = use_auxiliary_loss a_ : Dict = feature_strides a_ : List[str] = output_auxiliary_logits a_ : Dict = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : List[Any] = self.backbone_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) UpperCAmelCase_ : int = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = '''audio-spectrogram-transformer''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=1_0 , SCREAMING_SNAKE_CASE__ : int=1_0 , SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , **SCREAMING_SNAKE_CASE__ : str , ) -> Optional[int]: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : Dict = hidden_size a_ : Any = num_hidden_layers a_ : Union[str, Any] = num_attention_heads a_ : Optional[Any] = intermediate_size a_ : Optional[Any] = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : List[str] = initializer_range a_ : List[Any] = layer_norm_eps a_ : int = patch_size a_ : int = qkv_bias a_ : int = frequency_stride a_ : Union[str, Any] = time_stride a_ : str = max_length a_ : List[Any] = num_mel_bins
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class SCREAMING_SNAKE_CASE__ : snake_case__ : int snake_case__ : Node | None = None snake_case__ : Node | None = None def SCREAMING_SNAKE_CASE_ ( ) -> Node | None: """simple docstring""" a_ : Dict = Node(1 ) a_ : Any = Node(2 ) a_ : List[Any] = Node(3 ) a_ : Tuple = Node(4 ) a_ : int = Node(5 ) return tree def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]: """simple docstring""" return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]: """simple docstring""" return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> list[int]: """simple docstring""" return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> int: """simple docstring""" return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None]: """simple docstring""" a_ : list[Any] = [] if root is None: return output a_ : Optional[int] = deque([root] ) while process_queue: a_ : str = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]: """simple docstring""" a_ : list[Any] = [] def populate_output(__A : Node | None , __A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(__A , __A ) return output def SCREAMING_SNAKE_CASE_ ( __A : Node | None , __A : int ) -> Sequence[Node | None]: """simple docstring""" a_ : list[Any] = [] def populate_output(__A : Node | None , __A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(__A , __A ) return output def SCREAMING_SNAKE_CASE_ ( __A : Node | None ) -> Sequence[Node | None] | list[Any]: """simple docstring""" if root is None: return [] a_ : list[Sequence[Node | None]] = [] a_ : List[str] = 0 a_ : Any = height(__A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__A , __A ) ) a_ : List[Any] = 1 else: output.append(get_nodes_from_right_to_left(__A , __A ) ) a_ : Any = 0 return output def SCREAMING_SNAKE_CASE_ ( ) -> None: # Main function for testing. """simple docstring""" a_ : str = make_tree() print(F"""In-order Traversal: {inorder(__A )}""" ) print(F"""Pre-order Traversal: {preorder(__A )}""" ) print(F"""Post-order Traversal: {postorder(__A )}""" , '\n' ) print(F"""Height of Tree: {height(__A )}""" , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(__A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(__A ) + 1 ): print(F"""Level {level}:""" , get_nodes_from_left_to_right(__A , level=__A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : AutoencoderKL , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , ) -> List[str]: super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a_ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : float = 7.5 , SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> List[str]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Optional[int] = 1 elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : str = len(SCREAMING_SNAKE_CASE__ ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(SCREAMING_SNAKE_CASE__ )}.""" ) # get prompt text embeddings a_ : List[Any] = self.tokenizer( SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) a_ : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: a_ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) a_ : Any = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: a_ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method a_ , a_ , a_ : Any = text_embeddings.shape a_ : Optional[int] = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 ) a_ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. a_ : List[str] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: a_ : List[str] if negative_prompt is None: a_ : str = [''] elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !=""" F""" {type(SCREAMING_SNAKE_CASE__ )}.""" ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE__ ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ' the batch size of `prompt`.' ) else: a_ : List[Any] = negative_prompt a_ : int = text_input_ids.shape[-1] a_ : Tuple = self.tokenizer( SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , ) a_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method a_ : Optional[Any] = uncond_embeddings.shape[1] a_ : int = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 ) a_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes a_ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. a_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) a_ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4) a_ : Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps a_ : Tuple = torch.randn( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device='cpu' , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device ) a_ : Optional[Any] = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device='cpu' , dtype=SCREAMING_SNAKE_CASE__ ).to( self.device ) else: a_ : Union[str, Any] = torch.randn( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ ) a_ : Any = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) a_ : Optional[Any] = latents_reference.to(self.device ) a_ : Union[str, Any] = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images a_ : Dict = (latents_shape[3] - latents_shape_reference[3]) // 2 a_ : Any = (latents_shape[2] - latents_shape_reference[2]) // 2 a_ : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx a_ : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy a_ : Dict = 0 if dx < 0 else dx a_ : Any = 0 if dy < 0 else dy a_ : Optional[int] = max(-dx , 0 ) a_ : Optional[int] = max(-dy , 0 ) # import pdb # pdb.set_trace() a_ : Dict = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand a_ : List[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler a_ : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] a_ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) a_ : Optional[int] = {} if accepts_eta: a_ : List[Any] = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ): # expand the latents if we are doing classifier free guidance a_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a_ : Any = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # predict the noise residual a_ : Optional[int] = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample # perform guidance if do_classifier_free_guidance: a_ , a_ : Optional[int] = noise_pred.chunk(2 ) a_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 a_ : Any = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Dict = 1 / 0.18215 * latents a_ : Dict = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample a_ : str = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 a_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: a_ : int = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors='pt' ).to( self.device ) a_ , a_ : str = self.safety_checker( images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: a_ : List[Any] = None if output_type == "pil": a_ : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
32
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): UpperCAmelCase_ : Any = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) UpperCAmelCase_ : Optional[Any] = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } UpperCAmelCase_ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Union[str, Any] = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } UpperCAmelCase_ : int = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Dict = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) UpperCAmelCase_ : Dict = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Optional[int] = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) UpperCAmelCase_ : int = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' UpperCAmelCase_ : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' UpperCAmelCase_ : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' UpperCAmelCase_ : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' UpperCAmelCase_ : List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' UpperCAmelCase_ : str = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' UpperCAmelCase_ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' UpperCAmelCase_ : str = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' UpperCAmelCase_ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : int = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' UpperCAmelCase_ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' UpperCAmelCase_ : Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' UpperCAmelCase_ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' UpperCAmelCase_ : Optional[int] = '' UpperCAmelCase_ : Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' UpperCAmelCase_ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' UpperCAmelCase_ : List[Any] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Optional[int] ) -> Any: """simple docstring""" assert ReadMe.from_string(__A , __A ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] ) -> int: """simple docstring""" with pytest.raises(__A , match=re.escape(expected_error.format(path='root' ) ) ): a_ : List[str] = ReadMe.from_string(__A , __A ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[int] ) -> Dict: """simple docstring""" with pytest.raises(__A , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(__A , __A ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[Any]: """simple docstring""" ReadMe.from_string(__A , __A , suppress_parsing_errors=__A ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[str] ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a_ : Any = Path(__A ) / 'README.md' with open(__A , 'w+' ) as readme_file: readme_file.write(__A ) a_ : List[str] = ReadMe.from_readme(__A , __A ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : str ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a_ : List[str] = Path(__A ) / 'README.md' with open(__A , 'w+' ) as readme_file: readme_file.write(__A ) a_ : Union[str, Any] = expected_error.format(path=__A ) with pytest.raises(__A , match=re.escape(__A ) ): a_ : Union[str, Any] = ReadMe.from_readme(__A , __A ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a_ : Union[str, Any] = Path(__A ) / 'README.md' with open(__A , 'w+' ) as readme_file: readme_file.write(__A ) a_ : Optional[int] = expected_error.format(path=__A ) with pytest.raises(__A , match=re.escape(__A ) ): ReadMe.from_readme(__A , __A ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: a_ : Tuple = Path(__A ) / 'README.md' with open(__A , 'w+' ) as readme_file: readme_file.write(__A ) ReadMe.from_readme(__A , __A , suppress_parsing_errors=__A )
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Tuple = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
32
1
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a_ : str = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a_ : Tuple = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: a_ : int = 4 a_ : str = 48 a_ : Union[str, Any] = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a_ : List[Any] = [6, 6, 6, 6] a_ : Tuple = 60 a_ : Union[str, Any] = [6, 6, 6, 6] a_ : Optional[int] = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a_ : Tuple = 4 a_ : Optional[int] = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: a_ : List[str] = 1 a_ : Optional[int] = 1 a_ : int = 1_26 a_ : Optional[Any] = 7 a_ : Optional[int] = 255.0 a_ : Tuple = '' return config def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[int] ) -> List[str]: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: a_ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: a_ : List[Any] = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: a_ : Any = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: a_ : Tuple = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: a_ : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: a_ : Tuple = name.replace('attn' , 'attention.self' ) if "norm1" in name: a_ : Optional[int] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: a_ : Union[str, Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: a_ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: a_ : List[str] = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: a_ : str = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: a_ : Optional[int] = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: a_ : Optional[int] = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: a_ : List[str] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: a_ : List[Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": a_ : Union[str, Any] = 'layernorm.weight' if name == "norm.bias": a_ : Optional[int] = 'layernorm.bias' if "conv_first" in name: a_ : Tuple = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: a_ : Optional[int] = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: a_ : Union[str, Any] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: a_ : Union[str, Any] = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: a_ : Union[str, Any] = name.replace('upsample.2' , 'upsample.convolution_1' ) a_ : Optional[int] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": a_ : Union[str, Any] = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) a_ : Union[str, Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: a_ : List[str] = 'swin2sr.' + name return name def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : List[str] ) -> Dict: """simple docstring""" for key in orig_state_dict.copy().keys(): a_ : List[Any] = orig_state_dict.pop(__A ) if "qkv" in key: a_ : List[Any] = key.split('.' ) a_ : Optional[Any] = int(key_split[1] ) a_ : Optional[Any] = int(key_split[4] ) a_ : Tuple = config.embed_dim if "weight" in key: a_ : Union[str, Any] = val[:dim, :] a_ : Tuple = val[dim : dim * 2, :] a_ : Union[str, Any] = val[-dim:, :] else: a_ : Tuple = val[:dim] a_ : Optional[Any] = val[dim : dim * 2] a_ : Any = val[-dim:] pass else: a_ : Union[str, Any] = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[int] , __A : List[Any] ) -> Optional[int]: """simple docstring""" a_ : Any = get_config(__A ) a_ : Any = SwinaSRForImageSuperResolution(__A ) model.eval() a_ : List[str] = torch.hub.load_state_dict_from_url(__A , map_location='cpu' ) a_ : List[Any] = convert_state_dict(__A , __A ) a_ , a_ : Tuple = model.load_state_dict(__A , strict=__A ) if len(__A ) > 0: raise ValueError('Missing keys when converting: {}'.format(__A ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values a_ : Union[str, Any] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' a_ : Tuple = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' ) a_ : Union[str, Any] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values a_ : Any = 1_26 if 'Jpeg' in checkpoint_url else 2_56 a_ : Optional[int] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a_ : str = transforms(__A ).unsqueeze(0 ) if config.num_channels == 1: a_ : int = pixel_values[:, 0, :, :].unsqueeze(1 ) a_ : int = model(__A ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: a_ : Optional[int] = torch.Size([1, 3, 5_12, 5_12] ) a_ : str = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a_ : Dict = torch.Size([1, 3, 10_24, 10_24] ) a_ : Union[str, Any] = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here a_ : Union[str, Any] = torch.Size([1, 3, 10_24, 10_24] ) a_ : int = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a_ : Union[str, Any] = torch.Size([1, 3, 5_12, 5_12] ) a_ : Optional[int] = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a_ : Any = torch.Size([1, 3, 10_24, 10_24] ) a_ : Tuple = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __A , atol=1e-3 ) print('Looks ok!' ) a_ : int = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } a_ : List[str] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__A ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') UpperCAmelCase_ : List[str] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
32
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Any = GPTSanJapaneseTokenizer snake_case__ : Tuple = False snake_case__ : str = {'''do_clean_text''': False, '''add_prefix_space''': False} def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: super().setUp() # fmt: off a_ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on a_ : int = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 a_ : List[Any] = {'unk_token': '<unk>'} a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: a_ : Optional[int] = 'こんにちは、世界。 \nこんばんは、㔺界。😀' a_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Dict: a_ , a_ : Union[str, Any] = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: a_ : List[str] = self.get_tokenizer() # Testing tokenization a_ : List[Any] = 'こんにちは、世界。 こんばんは、㔺界。' a_ : Optional[int] = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] a_ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens a_ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] a_ : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens a_ : int = tokens + [tokenizer.unk_token] a_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization a_ : Dict = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' a_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' a_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Dict: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : List[Any] = 'こんにちは、世界。' a_ : int = 'こんばんは、㔺界。😀' a_ : Dict = 'こんにちは、世界。こんばんは、世界。😀' a_ : Optional[int] = tokenizer.encode(prefix_text + input_text ) a_ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text ) a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: a_ : Tuple = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization a_ : str = 'こんにちは、世界。' a_ : List[str] = 'こんばんは、㔺界。😀' a_ : str = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Tuple = len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) - 2 a_ : Optional[Any] = [1] + [0] * (len_prefix + len_text + 1) a_ : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] a_ : Tuple = [1] + [1] * (len_prefix) + [0] * (len_text + 1) a_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids a_ : Union[str, Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids a_ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , prefix_text=SCREAMING_SNAKE_CASE__ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: a_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[int] = tokenizer.encode('あンいワ' ) a_ : Dict = tokenizer.encode('' , prefix_text='あンいワ' ) a_ : Dict = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: a_ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) a_ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] a_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # fmt: off a_ : List[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] a_ : Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] a_ : List[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: # tokenizer has no padding token pass
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''realm''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : int=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu_new" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1E-12 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : int=1E-3 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Any=3_2_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_3_3_5_3_7_1_8 , SCREAMING_SNAKE_CASE__ : Dict=5_0_0_0 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Common config a_ : Optional[int] = vocab_size a_ : List[Any] = max_position_embeddings a_ : Dict = hidden_size a_ : List[str] = retriever_proj_size a_ : Optional[int] = num_hidden_layers a_ : Optional[Any] = num_attention_heads a_ : Dict = num_candidates a_ : Any = intermediate_size a_ : Optional[int] = hidden_act a_ : Optional[Any] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : List[Any] = initializer_range a_ : str = type_vocab_size a_ : Any = layer_norm_eps # Reader config a_ : str = span_hidden_size a_ : List[Any] = max_span_width a_ : Tuple = reader_layer_norm_eps a_ : int = reader_beam_size a_ : List[Any] = reader_seq_len # Retrieval config a_ : int = num_block_records a_ : int = searcher_beam_size
32
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Union[str, Any] = ['''pixel_values'''] def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) a_ : str = size if size is not None else {'shortest_edge': 2_5_6} a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = do_resize a_ : Dict = size a_ : Optional[Any] = resample a_ : Optional[int] = do_center_crop a_ : Dict = crop_size a_ : int = do_rescale a_ : int = rescale_factor a_ : Tuple = do_normalize a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray: a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]: a_ : List[str] = do_resize if do_resize is not None else self.do_resize a_ : Dict = size if size is not None else self.size a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = resample if resample is not None else self.resample a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop a_ : int = crop_size if crop_size is not None else self.crop_size a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor a_ : Any = do_normalize if do_normalize is not None else self.do_normalize a_ : str = image_mean if image_mean is not None else self.image_mean a_ : Dict = image_std if image_std is not None else self.image_std a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] a_ : Tuple = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
32
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase_ : Tuple = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]: """simple docstring""" a_ : Any = int(__A ) # Initialize Result a_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__A ): # Find denominations while int(__A ) >= int(__A ): total_value -= int(__A ) answer.append(__A ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Union[str, Any] = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(F'Following is minimal change for {value}: ') UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
32
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available UpperCAmelCase_ : Any = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
import flax.linen as nn import jax import jax.numpy as jnp class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : Dict = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape a_ : List[str] = jax.image.resize( SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: a_ : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) a_ : str = self.conv(SCREAMING_SNAKE_CASE__ ) return hidden_states class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int snake_case__ : int = None snake_case__ : float = 0.0 snake_case__ : bool = None snake_case__ : jnp.dtype = jnp.floataa def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : Any = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype ) a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 ) a_ : int = nn.Dropout(self.dropout_prob ) a_ : Optional[Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut a_ : List[Any] = None if use_nin_shortcut: a_ : Union[str, Any] = nn.Conv( SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int: a_ : List[Any] = hidden_states a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ ) a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) ) a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 ) a_ : Optional[int] = hidden_states + temb a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ ) if self.conv_shortcut is not None: a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ ) return hidden_states + residual
32
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''xlm-roberta-xl''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=2_5_0_8_8_0 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6_0 , SCREAMING_SNAKE_CASE__ : Tuple=3_6 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0_2_4_0 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_4 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : str=1E-05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Optional[int]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) a_ : Any = vocab_size a_ : str = hidden_size a_ : Optional[Any] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Tuple = hidden_act a_ : Optional[Any] = intermediate_size a_ : Dict = hidden_dropout_prob a_ : str = attention_probs_dropout_prob a_ : Union[str, Any] = max_position_embeddings a_ : Any = type_vocab_size a_ : int = initializer_range a_ : List[str] = layer_norm_eps a_ : str = position_embedding_type a_ : int = use_cache a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
32
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[Any] = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) a_ : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : Tuple = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: import torch a_ : List[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) a_ : Any = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) a_ : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : List[str] = pipeline('text-classification' ) a_ : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Tuple = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Dict = pipeline('text-classification' , framework='tf' ) a_ : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] ) a_ : int = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) a_ : Optional[int] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: a_ : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 a_ : Union[str, Any] = 'HuggingFace is in' a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France'] a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ ) a_ : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , ) a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. a_ : Any = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_classifier(SCREAMING_SNAKE_CASE__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
32
1
from __future__ import annotations import math import random from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__( self : int ) -> None: a_ : list[Any] = [] a_ : int = 0 a_ : int = 0 def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.head == self.tail def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> None: self.data.append(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.tail + 1 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: a_ : Any = self.data[self.head] a_ : Optional[int] = self.head + 1 return ret def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.tail - self.head def SCREAMING_SNAKE_CASE ( self : Tuple ) -> None: print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> None: a_ : int = data a_ : MyNode | None = None a_ : MyNode | None = None a_ : int = 1 def SCREAMING_SNAKE_CASE ( self : int ) -> Any: return self.data def SCREAMING_SNAKE_CASE ( self : int ) -> MyNode | None: return self.left def SCREAMING_SNAKE_CASE ( self : Any ) -> MyNode | None: return self.right def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return self.height def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> None: a_ : Optional[Any] = data def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : MyNode | None ) -> None: a_ : Optional[int] = node def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : MyNode | None ) -> None: a_ : Any = node def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> None: a_ : Optional[Any] = height def SCREAMING_SNAKE_CASE_ ( __A : MyNode | None ) -> int: """simple docstring""" if node is None: return 0 return node.get_height() def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int: """simple docstring""" if a > b: return a return b def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> MyNode: """simple docstring""" print('left rotation node:' , node.get_data() ) a_ : Optional[int] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(__A ) a_ : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__A ) a_ : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(__A ) return ret def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> MyNode: """simple docstring""" print('right rotation node:' , node.get_data() ) a_ : Dict = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(__A ) a_ : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__A ) a_ : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(__A ) return ret def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> MyNode: """simple docstring""" a_ : Optional[Any] = node.get_left() assert left_child is not None node.set_left(left_rotation(__A ) ) return right_rotation(__A ) def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> MyNode: """simple docstring""" a_ : int = node.get_right() assert right_child is not None node.set_right(right_rotation(__A ) ) return left_rotation(__A ) def SCREAMING_SNAKE_CASE_ ( __A : MyNode | None , __A : Any ) -> MyNode | None: """simple docstring""" if node is None: return MyNode(__A ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , __A ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected a_ : Any = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child a_ : List[Any] = right_rotation(__A ) else: a_ : Tuple = lr_rotation(__A ) else: node.set_right(insert_node(node.get_right() , __A ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: a_ : Optional[Any] = node.get_right() assert right_child is not None if data < right_child.get_data(): a_ : int = rl_rotation(__A ) else: a_ : List[Any] = left_rotation(__A ) a_ : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(__A ) return node def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> Any: """simple docstring""" while True: a_ : List[Any] = root.get_right() if right_child is None: break a_ : List[str] = right_child return root.get_data() def SCREAMING_SNAKE_CASE_ ( __A : MyNode ) -> Any: """simple docstring""" while True: a_ : Tuple = root.get_left() if left_child is None: break a_ : Optional[Any] = left_child return root.get_data() def SCREAMING_SNAKE_CASE_ ( __A : MyNode , __A : Any ) -> MyNode | None: """simple docstring""" a_ : List[str] = root.get_left() a_ : List[Any] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: a_ : List[Any] = get_left_most(__A ) root.set_data(__A ) root.set_right(del_node(__A , __A ) ) elif left_child is not None: a_ : Optional[Any] = left_child elif right_child is not None: a_ : Union[str, Any] = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(__A , __A ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(__A , __A ) ) if get_height(__A ) - get_height(__A ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): a_ : List[str] = left_rotation(__A ) else: a_ : List[Any] = rl_rotation(__A ) elif get_height(__A ) - get_height(__A ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): a_ : str = right_rotation(__A ) else: a_ : int = lr_rotation(__A ) a_ : List[str] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(__A ) return root class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] ) -> None: a_ : MyNode | None = None def SCREAMING_SNAKE_CASE ( self : str ) -> int: return get_height(self.root ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> None: print('insert:' + str(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = insert_node(self.root , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> None: print('delete:' + str(SCREAMING_SNAKE_CASE__ ) ) if self.root is None: print('Tree is empty!' ) return a_ : Tuple = del_node(self.root , SCREAMING_SNAKE_CASE__ ) def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree a_ : str = '' a_ : List[str] = MyQueue() q.push(self.root ) a_ : Optional[int] = self.get_height() if layer == 0: return output a_ : List[str] = 0 while not q.is_empty(): a_ : Any = q.pop() a_ : Optional[int] = ' ' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(SCREAMING_SNAKE_CASE__ ) q.push(SCREAMING_SNAKE_CASE__ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space a_ : int = cnt + 1 for i in range(1_0_0 ): if cnt == math.pow(2 , SCREAMING_SNAKE_CASE__ ) - 1: a_ : Dict = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() UpperCAmelCase_ : Dict = AVLtree() UpperCAmelCase_ : Union[str, Any] = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
32
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : str = 'T5Config' def SCREAMING_SNAKE_CASE_ ( __A : jnp.array , __A : int , __A : int ) -> jnp.ndarray: """simple docstring""" a_ : Dict = jnp.zeros_like(__A ) a_ : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a_ : str = shifted_input_ids.at[:, 0].set(__A ) a_ : int = jnp.where(shifted_input_ids == -1_00 , __A , __A ) return shifted_input_ids class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[Any] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''mt5''' snake_case__ : List[str] = MTaConfig class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mt5''' snake_case__ : Union[str, Any] = MTaConfig
32
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCAmelCase_ : List[Any] = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Any: """simple docstring""" a_ : Optional[int] = {} state_dict.pop('pixel_mean' , __A ) state_dict.pop('pixel_std' , __A ) a_ : Dict = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: a_ : Tuple = key.replace(__A , __A ) if re.match(__A , __A ): a_ : int = int(re.match(__A , __A ).group(2 ) ) if layer_nb == 0: a_ : Dict = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: a_ : List[str] = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: a_ : int = key.replace('layers.2' , 'proj_out' ) a_ : List[Any] = value a_ : Optional[int] = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : List[Any] , __A : Tuple="ybelkada/segment-anything" ) -> str: """simple docstring""" a_ : Tuple = hf_hub_download(__A , F"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: a_ : Optional[int] = SamConfig() elif "sam_vit_l" in model_name: a_ : Union[str, Any] = SamVisionConfig( hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) a_ : Optional[int] = SamConfig( vision_config=__A , ) elif "sam_vit_h" in model_name: a_ : List[str] = SamVisionConfig( hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) a_ : int = SamConfig( vision_config=__A , ) a_ : str = torch.load(__A , map_location='cpu' ) a_ : Tuple = replace_keys(__A ) a_ : Optional[Any] = SamImageProcessor() a_ : Any = SamProcessor(image_processor=__A ) a_ : List[Any] = SamModel(__A ) hf_model.load_state_dict(__A ) a_ : Dict = hf_model.to('cuda' ) a_ : Optional[Any] = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' a_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' ) a_ : Dict = [[[4_00, 6_50]]] a_ : str = [[1]] a_ : Optional[Any] = processor(images=np.array(__A ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a_ : str = hf_model(**__A ) a_ : List[str] = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 a_ : List[str] = processor( images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a_ : Optional[Any] = hf_model(**__A ) a_ : str = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 a_ : Any = ((75, 2_75, 17_25, 8_50),) a_ : Optional[int] = processor(images=np.array(__A ) , input_boxes=__A , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a_ : str = hf_model(**__A ) a_ : Dict = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. a_ : Union[str, Any] = [[[4_00, 6_50], [8_00, 6_50]]] a_ : Optional[int] = [[1, 1]] a_ : Any = processor( images=np.array(__A ) , input_points=__A , input_labels=__A , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): a_ : Optional[int] = hf_model(**__A ) a_ : List[str] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() UpperCAmelCase_ : Optional[int] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
32
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict: """simple docstring""" a_ : Tuple = script.contents[0] a_ : int = json.loads(data[data.find('{"config"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: a_ : Tuple = F"""https://www.instagram.com/{username}/""" a_ : Optional[Any] = self.get_json() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict: a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Union[str, Any] ) -> str: return F"""{self.__class__.__name__}('{self.username}')""" def __str__( self : Optional[int] ) -> str: return F"""{self.fullname} ({self.username}) is {self.biography}""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: return self.user_data["username"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: return self.user_data["full_name"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.user_data["biography"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return self.user_data["business_email"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return self.user_data["edge_followed_by"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: return self.user_data["edge_follow"]["count"] @property def SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: return self.user_data["profile_pic_url_hd"] @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool: return self.user_data["is_verified"] @property def SCREAMING_SNAKE_CASE ( self : Any ) -> bool: return self.user_data["is_private"] def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None: """simple docstring""" import os if os.environ.get('CI' ): return # test failing on GitHub Actions a_ : int = InstagramUser(__A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , __A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : Union[str, Any] = InstagramUser('github') print(instagram_user) print(F'{instagram_user.number_of_posts = }') print(F'{instagram_user.number_of_followers = }') print(F'{instagram_user.number_of_followings = }') print(F'{instagram_user.email = }') print(F'{instagram_user.website = }') print(F'{instagram_user.profile_picture_url = }') print(F'{instagram_user.is_verified = }') print(F'{instagram_user.is_private = }')
32
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[int] = RobertaTokenizer snake_case__ : Tuple = RobertaTokenizerFast snake_case__ : Dict = True snake_case__ : Union[str, Any] = {'''cls_token''': '''<s>'''} def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a_ : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] a_ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) a_ : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] a_ : Optional[int] = {'unk_token': '<unk>'} a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : str , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: a_ : str = 'lower newer' a_ : Optional[int] = 'lower newer' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) a_ : List[Any] = 'lower newer' a_ : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] a_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokens + [tokenizer.unk_token] a_ : int = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : str = self.tokenizer_class.from_pretrained('roberta-base' ) a_ : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.encode('multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Any = tokenizer.encode( 'sequence builders' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) a_ : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def SCREAMING_SNAKE_CASE ( self : Any ) -> str: a_ : List[Any] = self.get_tokenizer() a_ : List[Any] = 'Encode this sequence.' a_ : Tuple = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments a_ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) a_ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing spaces after special tokens a_ : Dict = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )} ) # mask token has a left space a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'Encode <mask> sequence' a_ : Tuple = 'Encode <mask>sequence' a_ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = encoded.index(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = encoded.index(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> int: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A, <mask> AllenNLP sentence.' a_ : Dict = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) a_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) a_ : int = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): a_ : int = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) a_ : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(post_processor_state['add_prefix_space'] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(post_processor_state['trim_offsets'] , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> str: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a_ : int = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` a_ : List[Any] = F"""{text_of_1_token} {text_of_1_token}""" a_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : int = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Dict = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ) + 1, len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : Dict = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : Any = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Any = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : Tuple = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) a_ : List[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ) + 1, 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Any = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , ) a_ : Dict = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = tokenizer_r(SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE__ ), 1 + len(SCREAMING_SNAKE_CASE__ ) + 1 + len(SCREAMING_SNAKE_CASE__ )) , )
32
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = ['''image_processor''', '''tokenizer'''] snake_case__ : Union[str, Any] = '''CLIPImageProcessor''' snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any: a_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = kwargs.pop('feature_extractor' ) a_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: a_ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : str = self.tokenizer.model_input_names a_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor
32
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser UpperCAmelCase_ : List[Any] = re.compile(R'\s+') def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Optional[Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__A , '' , example['content'] ).encode('utf-8' ) ).hexdigest()} def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Dict: """simple docstring""" a_ : List[Any] = [len(__A ) for line in example['content'].splitlines()] return {"line_mean": np.mean(__A ), "line_max": max(__A )} def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[Any]: """simple docstring""" a_ : List[Any] = np.mean([c.isalnum() for c in example['content']] ) return {"alpha_frac": alpha_frac} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] ) -> Optional[int]: """simple docstring""" if example["hash"] in uniques: uniques.remove(example['hash'] ) return True else: return False def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : int=5 ) -> Any: """simple docstring""" a_ : Dict = ['auto-generated', 'autogenerated', 'automatically generated'] a_ : Optional[int] = example['content'].splitlines() for _, line in zip(range(__A ) , __A ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Tuple=5 , __A : List[str]=0.05 ) -> List[Any]: """simple docstring""" a_ : List[Any] = ['unit tests', 'test file', 'configuration file'] a_ : Union[str, Any] = example['content'].splitlines() a_ : Union[str, Any] = 0 a_ : Optional[Any] = 0 # first test for _, line in zip(range(__A ) , __A ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test a_ : Optional[Any] = example['content'].count('\n' ) a_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count('config' ) count_test += line.lower().count('test' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[str]: """simple docstring""" a_ : str = ['def ', 'class ', 'for ', 'while '] a_ : Optional[Any] = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : List[Any]=4 ) -> Optional[Any]: """simple docstring""" a_ : Any = example['content'].splitlines() a_ : List[Any] = 0 for line in lines: counter += line.lower().count('=' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]: """simple docstring""" a_ : Optional[int] = tokenizer(example['content'] , truncation=__A )['input_ids'] a_ : Union[str, Any] = len(example['content'] ) / len(__A ) return {"ratio": ratio} def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Optional[Any]: """simple docstring""" a_ : List[Any] = {} results.update(get_hash(__A ) ) results.update(line_stats(__A ) ) results.update(alpha_stats(__A ) ) results.update(char_token_ratio(__A ) ) results.update(is_autogenerated(__A ) ) results.update(is_config_or_test(__A ) ) results.update(has_no_keywords(__A ) ) results.update(has_few_assignments(__A ) ) return results def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not check_uniques(__A , __A ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" with open(__A , 'rb' ) as f_in: with gzip.open(str(__A ) + '.gz' , 'wb' , compresslevel=6 ) as f_out: shutil.copyfileobj(__A , __A ) os.unlink(__A ) # Settings UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(PreprocessingArguments) UpperCAmelCase_ : Optional[Any] = parser.parse_args() if args.num_workers is None: UpperCAmelCase_ : Tuple = multiprocessing.cpu_count() UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset UpperCAmelCase_ : List[str] = time.time() UpperCAmelCase_ : Union[str, Any] = load_dataset(args.dataset_name, split='train') print(F'Time to load dataset: {time.time()-t_start:.2f}') # Run preprocessing UpperCAmelCase_ : List[Any] = time.time() UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers) print(F'Time to preprocess dataset: {time.time()-t_start:.2f}') # Deduplicate hashes UpperCAmelCase_ : int = set(ds.unique('hash')) UpperCAmelCase_ : List[str] = len(uniques) / len(ds) print(F'Fraction of duplicates: {1-frac:.2%}') # Deduplicate data and apply heuristics UpperCAmelCase_ : Optional[Any] = time.time() UpperCAmelCase_ : Optional[int] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(F'Time to filter dataset: {time.time()-t_start:.2f}') print(F'Size of filtered dataset: {len(ds_filter)}') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: UpperCAmelCase_ : Optional[int] = time.time() UpperCAmelCase_ , UpperCAmelCase_ : int = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}') print(F'Size of deduplicate dataset: {len(ds_filter)}') # Save data in batches of samples_per_file UpperCAmelCase_ : int = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) UpperCAmelCase_ : Dict = output_dir / 'data' data_dir.mkdir(exist_ok=True) UpperCAmelCase_ : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): UpperCAmelCase_ : List[Any] = str(data_dir / F'file-{file_number+1:012}.json') UpperCAmelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F'Time to save dataset: {time.time()-t_start:.2f}')
32
from __future__ import annotations UpperCAmelCase_ : Tuple = [] def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool: """simple docstring""" for i in range(len(__A ) ): if board[row][i] == 1: return False for i in range(len(__A ) ): if board[i][column] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ): if board[i][j] == 1: return False return True def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool: """simple docstring""" if row >= len(__A ): solution.append(__A ) printboard(__A ) print() return True for i in range(len(__A ) ): if is_safe(__A , __A , __A ): a_ : Any = 1 solve(__A , row + 1 ) a_ : Tuple = 0 return False def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__A ) ): for j in range(len(__A ) ): if board[i][j] == 1: print('Q' , end=' ' ) else: print('.' , end=' ' ) print() # n=int(input("The no. of queens")) UpperCAmelCase_ : List[str] = 8 UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
32
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Dict = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
32
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : Optional[Any] = HfArgumentParser(__A ) a_ : Optional[int] = parser.parse_args_into_dataclasses()[0] a_ : List[Any] = TensorFlowBenchmark(args=__A ) try: a_ : List[str] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] ) a_ : int = '' a_ : int = eval(str(__A ).split(' ' )[-1] ) a_ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__A ) if len(__A ) > 0: a_ : str = full_error_msg + begin_error_msg + str(__A ) raise ValueError(__A ) benchmark.run() if __name__ == "__main__": main()
32
1
import math def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE_ ( __A : float = 0.1 ) -> int: """simple docstring""" a_ : Dict = 3 a_ : str = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(__A ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
32
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Optional[Any] = TextToVideoSDPipeline snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. snake_case__ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) a_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , ) a_ : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) torch.manual_seed(0 ) a_ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) a_ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) a_ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : int = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator a_ : Dict = self.get_dummy_components() a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'np' a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames a_ : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (6_4, 6_4, 3) a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> str: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: a_ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' ) a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a_ : Optional[Any] = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames a_ : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: a_ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' ) a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' ) a_ : Tuple = pipe.to('cuda' ) a_ : Any = 'Spiderman is surfing' a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames a_ : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
32
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : Dict , ) -> List[Any]: super().__init__() a_ : Tuple = value_function a_ : List[str] = unet a_ : Optional[int] = scheduler a_ : str = env a_ : str = env.get_dataset() a_ : Dict = {} for key in self.data.keys(): try: a_ : Any = self.data[key].mean() except: # noqa: E722 pass a_ : List[Any] = {} for key in self.data.keys(): try: a_ : int = self.data[key].std() except: # noqa: E722 pass a_ : int = env.observation_space.shape[0] a_ : Tuple = env.action_space.shape[0] def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Any: return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]: if type(SCREAMING_SNAKE_CASE__ ) is dict: return {k: self.to_torch(SCREAMING_SNAKE_CASE__ ) for k, v in x_in.items()} elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ): return x_in.to(self.unet.device ) return torch.tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: for key, val in cond.items(): a_ : int = val.clone() return x_in def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: a_ : Optional[int] = x.shape[0] a_ : Optional[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model a_ : Optional[int] = torch.full((batch_size,) , SCREAMING_SNAKE_CASE__ , device=self.unet.device , dtype=torch.long ) for _ in range(SCREAMING_SNAKE_CASE__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models a_ : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample a_ : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] a_ : Union[str, Any] = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.exp(0.5 * posterior_variance ) a_ : List[Any] = model_std * grad a_ : List[str] = 0 a_ : Dict = x.detach() a_ : Any = x + scale * grad a_ : int = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Tuple = self.unet(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg a_ : List[str] = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , predict_epsilon=SCREAMING_SNAKE_CASE__ )['prev_sample'] # apply conditions to the trajectory (set the initial state) a_ : Optional[Any] = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Union[str, Any] = self.to_torch(SCREAMING_SNAKE_CASE__ ) return x, y def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=6_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Dict=0.1 ) -> List[Any]: # normalize the observations and create batch dimension a_ : List[str] = self.normalize(SCREAMING_SNAKE_CASE__ , 'observations' ) a_ : Dict = obs[None].repeat(SCREAMING_SNAKE_CASE__ , axis=0 ) a_ : Any = {0: self.to_torch(SCREAMING_SNAKE_CASE__ )} a_ : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , device=self.unet.device ) a_ : Dict = self.reset_xa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.action_dim ) a_ : Union[str, Any] = self.to_torch(SCREAMING_SNAKE_CASE__ ) # run the diffusion process a_ , a_ : List[str] = self.run_diffusion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # sort output trajectories by value a_ : List[str] = y.argsort(0 , descending=SCREAMING_SNAKE_CASE__ ).squeeze() a_ : int = x[sorted_idx] a_ : Optional[int] = sorted_values[:, :, : self.action_dim] a_ : Optional[Any] = actions.detach().cpu().numpy() a_ : Dict = self.de_normalize(SCREAMING_SNAKE_CASE__ , key='actions' ) # select the action with the highest value if y is not None: a_ : str = 0 else: # if we didn't run value guiding, select a random action a_ : List[Any] = np.random.randint(0 , SCREAMING_SNAKE_CASE__ ) a_ : Tuple = denorm_actions[selected_index, 0] return denorm_actions
32
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): # TODO: is there an appropriate internal test set? snake_case__ : Any = '''ssube/stable-diffusion-x4-upscaler-onnx''' def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : int=0 ) -> Tuple: a_ : Union[str, Any] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : int = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Tuple = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : List[Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: a_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Optional[Any] = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: a_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.get_dummy_inputs() a_ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : int = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: a_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) a_ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = self.get_dummy_inputs() a_ : List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images a_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) a_ : Union[str, Any] = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : List[str] = ort.SessionOptions() a_ : int = False return options def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: a_ : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : int = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default a_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = 'A fantasy landscape, trending on artstation' a_ : str = torch.manual_seed(0 ) a_ : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : Dict = output.images a_ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: a_ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) a_ : List[str] = init_image.resize((1_2_8, 1_2_8) ) a_ : Dict = LMSDiscreteScheduler.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' ) a_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( 'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) a_ : Any = 'A fantasy landscape, trending on artstation' a_ : Tuple = torch.manual_seed(0 ) a_ : Optional[Any] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , ) a_ : str = output.images a_ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) a_ : Tuple = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
32
1
UpperCAmelCase_ : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" a_ : Optional[Any] = input('Enter message: ' ) a_ : Optional[int] = input('Enter key [alphanumeric]: ' ) a_ : Tuple = input('Encrypt/Decrypt [e/d]: ' ) if mode.lower().startswith('e' ): a_ : int = 'encrypt' a_ : Tuple = encrypt_message(__A , __A ) elif mode.lower().startswith('d' ): a_ : int = 'decrypt' a_ : str = decrypt_message(__A , __A ) print(F"""\n{mode.title()}ed message:""" ) print(__A ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str: """simple docstring""" return translate_message(__A , __A , 'encrypt' ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str: """simple docstring""" return translate_message(__A , __A , 'decrypt' ) def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , __A : str ) -> str: """simple docstring""" a_ : Union[str, Any] = [] a_ : List[Any] = 0 a_ : Any = key.upper() for symbol in message: a_ : Optional[int] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__A ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__A ): a_ : Tuple = 0 else: translated.append(__A ) return "".join(__A ) if __name__ == "__main__": main()
32
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str: """simple docstring""" a_ : Tuple = [] for line in lines: a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments if line: filtered_lines.append(__A ) a_ : Tuple = '\n'.join(__A ) # Make a hash from all this code a_ : Tuple = full_str.encode('utf-8' ) return shaaaa(__A ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Dict = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
32
1
import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor UpperCAmelCase_ : Dict = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> None: warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
32
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Optional[int] = '''convbert''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = vocab_size a_ : List[str] = hidden_size a_ : List[str] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Optional[int] = intermediate_size a_ : int = hidden_act a_ : Dict = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : str = max_position_embeddings a_ : List[str] = type_vocab_size a_ : List[str] = initializer_range a_ : Tuple = layer_norm_eps a_ : Optional[int] = embedding_size a_ : List[Any] = head_ratio a_ : List[Any] = conv_kernel_size a_ : Tuple = num_groups a_ : Tuple = classifier_dropout class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a_ : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
32
1
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : int ) -> int: """simple docstring""" if len(__A ) < k or k < 0: raise ValueError('Invalid Input' ) a_ : List[Any] = sum(array[:k] ) for i in range(len(__A ) - k ): a_ : List[str] = current_sum - array[i] + array[i + k] a_ : Union[str, Any] = max(__A , __A ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() UpperCAmelCase_ : List[Any] = [randint(-1000, 1000) for i in range(100)] UpperCAmelCase_ : Tuple = randint(0, 110) print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
32
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
1
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: a_ : Tuple = parent a_ : int = batch_size a_ : Tuple = seq_length a_ : List[Any] = is_training a_ : List[str] = use_token_type_ids a_ : Dict = use_labels a_ : Any = vocab_size a_ : List[str] = hidden_size a_ : Tuple = num_hidden_layers a_ : List[Any] = num_attention_heads a_ : Dict = intermediate_size a_ : Any = hidden_act a_ : List[str] = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[Any] = max_position_embeddings a_ : List[Any] = type_vocab_size a_ : int = type_sequence_label_size a_ : List[Any] = initializer_range a_ : List[str] = num_labels a_ : Union[str, Any] = num_choices a_ : str = scope a_ : Tuple = self.vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = None if self.use_token_type_ids: a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : List[Any] = None a_ : Union[str, Any] = None a_ : List[Any] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a_ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Any = self.num_labels a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Optional[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) snake_case__ : List[str] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly snake_case__ : Dict = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]: a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": a_ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : str = inputs_dict['labels'] a_ : Optional[int] = inputs_dict['labels'] a_ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : str = OpenAIGPTModelTester(self ) a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: a_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: a_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is a_ : Tuple = [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
32
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Tuple = ['''image_processor''', '''tokenizer'''] snake_case__ : Union[str, Any] = '''CLIPImageProcessor''' snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any: a_ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = kwargs.pop('feature_extractor' ) a_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: a_ : Dict = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: a_ : str = self.tokenizer.model_input_names a_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def SCREAMING_SNAKE_CASE ( self : str ) -> str: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , ) return self.image_processor
32
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Optional[int] = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Any = '''mask2former''' snake_case__ : Any = ['''swin'''] snake_case__ : str = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 1_0 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_5_5 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : int , ) -> List[Any]: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) a_ : Dict = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a_ : Any = backbone_config.pop('model_type' ) a_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type] a_ : List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) a_ : Dict = backbone_config a_ : List[str] = feature_size a_ : List[str] = mask_feature_size a_ : int = hidden_dim a_ : Dict = encoder_feedforward_dim a_ : str = activation_function a_ : List[str] = encoder_layers a_ : List[str] = decoder_layers a_ : Dict = num_attention_heads a_ : str = dropout a_ : Tuple = dim_feedforward a_ : List[str] = pre_norm a_ : Optional[int] = enforce_input_projection a_ : Any = common_stride a_ : Optional[int] = ignore_value a_ : int = num_queries a_ : Tuple = no_object_weight a_ : Dict = class_weight a_ : Optional[int] = mask_weight a_ : Optional[int] = dice_weight a_ : str = train_num_points a_ : List[str] = oversample_ratio a_ : List[Any] = importance_sample_ratio a_ : Any = init_std a_ : Union[str, Any] = init_xavier_std a_ : Union[str, Any] = use_auxiliary_loss a_ : Dict = feature_strides a_ : List[str] = output_auxiliary_logits a_ : Dict = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__ ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: return cls( backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, any]: a_ : Optional[int] = copy.deepcopy(self.__dict__ ) a_ : List[Any] = self.backbone_config.to_dict() a_ : Optional[Any] = self.__class__.model_type return output
32
1
UpperCAmelCase_ : Optional[int] = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
32
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Union[str, Any] = { 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : List[str] = '''switch_transformers''' snake_case__ : Optional[int] = ['''past_key_values'''] snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: a_ : Optional[int] = vocab_size a_ : List[str] = d_model a_ : Tuple = d_kv a_ : Optional[Any] = d_ff a_ : List[Any] = num_sparse_encoder_layers a_ : Any = num_layers a_ : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a_ : List[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers else: a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers a_ : Dict = num_heads a_ : str = num_experts a_ : Any = expert_capacity a_ : List[Any] = router_bias a_ : str = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) a_ : Optional[int] = router_dtype a_ : int = router_ignore_padding_tokens a_ : Any = relative_attention_num_buckets a_ : List[str] = relative_attention_max_distance a_ : Optional[Any] = dropout_rate a_ : Tuple = layer_norm_epsilon a_ : Dict = initializer_factor a_ : Any = feed_forward_proj a_ : Tuple = use_cache a_ : str = add_router_probs a_ : Optional[int] = router_z_loss_coef a_ : List[str] = router_aux_loss_coef a_ : int = self.feed_forward_proj.split('-' ) a_ : int = act_info[-1] a_ : Optional[int] = act_info[0] == 'gated' if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a_ : Any = 'gelu_new' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
32
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ): snake_case__ : Optional[int] = '''bit''' snake_case__ : Optional[Any] = ['''preactivation''', '''bottleneck'''] snake_case__ : Tuple = ['''SAME''', '''VALID'''] def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE__ : Optional[Any]="preactivation" , SCREAMING_SNAKE_CASE__ : Tuple="relu" , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE__ ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: a_ : Any = global_padding.upper() else: raise ValueError(F"""Padding strategy {global_padding} not supported""" ) a_ : Optional[Any] = num_channels a_ : List[Any] = embedding_size a_ : Union[str, Any] = hidden_sizes a_ : List[str] = depths a_ : Any = layer_type a_ : Optional[int] = hidden_act a_ : Tuple = global_padding a_ : List[Any] = num_groups a_ : List[str] = drop_path_rate a_ : List[Any] = embedding_dynamic_padding a_ : int = output_stride a_ : str = width_factor a_ : Dict = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(SCREAMING_SNAKE_CASE__ ) + 1 )] a_ , a_ : List[Any] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , stage_names=self.stage_names )
32
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase_ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : str = '''facebook/nllb-200-distilled-600M''' snake_case__ : Union[str, Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) snake_case__ : Optional[Any] = '''translator''' snake_case__ : Tuple = AutoTokenizer snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM snake_case__ : Dict = LANGUAGE_CODES snake_case__ : str = ['''text''', '''text''', '''text'''] snake_case__ : Tuple = ['''text'''] def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(F"""{tgt_lang} is not a supported language.""" ) a_ : str = self.lang_to_code[src_lang] a_ : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
32
1
import operator def SCREAMING_SNAKE_CASE_ ( __A : list , __A : bool = False , __A : list | None = None ) -> list: """simple docstring""" a_ : Union[str, Any] = operator.lt if reverse else operator.gt a_ : List[str] = solution or [] if not arr: return solution a_ : str = [arr.pop(0 )] for i, item in enumerate(__A ): if _operator(__A , sublist[-1] ): sublist.append(__A ) arr.pop(__A ) # merging sublist into solution list if not solution: solution.extend(__A ) else: while sublist: a_ : List[Any] = sublist.pop(0 ) for i, xx in enumerate(__A ): if not _operator(__A , __A ): solution.insert(__A , __A ) break else: solution.append(__A ) strand_sort(__A , __A , __A ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
32
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] UpperCAmelCase_ : str = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str: """simple docstring""" assert len(str(__A ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a_ : List[str] = year // 1_00 a_ : Optional[int] = (5 * (century % 4) + 2) % 7 a_ : List[str] = year % 1_00 a_ : str = centurian % 12 a_ : List[str] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a_ : Any = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a_ : Any = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : str = ['''flax'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> int: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Any = ['''flax'''] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : int = ['''flax'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Dict = ['''flax'''] def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : List[Any] = ['''flax'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Optional[Any] = ['''flax'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Tuple = ['''flax'''] def __init__( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : int = ['''flax'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ) -> List[str]: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Any = ['''flax'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Optional[Any] = ['''flax'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> str: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Optional[int] = ['''flax'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Dict: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : Any = ['''flax'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]: requires_backends(cls , ['flax'] ) class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : List[str] = ['''flax'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Tuple ) -> int: requires_backends(self , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: requires_backends(cls , ['flax'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['flax'] )
32
import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE_ ( __A : jnp.ndarray , __A : int , __A : float = 1 , __A : float = 1 , __A : float = 1.0e4 , __A : bool = False , __A : float = 1.0 , ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" a_ : int = float(embedding_dim // 2 ) a_ : str = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) a_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment ) a_ : Optional[int] = jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 ) # scale embeddings a_ : str = scale * emb if flip_sin_to_cos: a_ : str = jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 ) else: a_ : Any = jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 ) a_ : Optional[int] = jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] ) return signal class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: a_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE__ ) a_ : Tuple = nn.silu(SCREAMING_SNAKE_CASE__ ) a_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE__ ) return temb class SCREAMING_SNAKE_CASE__ ( nn.Module ): snake_case__ : int = 32 snake_case__ : bool = False snake_case__ : float = 1 @nn.compact def __call__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
32
1
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list , __A : list , __A : list ) -> float: """simple docstring""" a_ : Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__A )] ) a_ : Union[str, Any] = np.array(__A ) a_ : Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __A ) ) , x.transpose() ) , __A ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list ) -> float: """simple docstring""" a_ : Optional[Any] = (1, 2, 1) a_ : List[Any] = (1, 1, 0, 7) a_ : Optional[int] = SARIMAX( __A , exog=__A , order=__A , seasonal_order=__A ) a_ : Union[str, Any] = model.fit(disp=__A , maxiter=6_00 , method='nm' ) a_ : int = model_fit.predict(1 , len(__A ) , exog=[test_match] ) return result[0] def SCREAMING_SNAKE_CASE_ ( __A : list , __A : list , __A : list ) -> float: """simple docstring""" a_ : List[str] = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(__A , __A ) a_ : Union[str, Any] = regressor.predict(__A ) return y_pred[0] def SCREAMING_SNAKE_CASE_ ( __A : list ) -> float: """simple docstring""" train_user.sort() a_ : str = np.percentile(__A , 25 ) a_ : Optional[Any] = np.percentile(__A , 75 ) a_ : Any = qa - qa a_ : Union[str, Any] = qa - (iqr * 0.1) return low_lim def SCREAMING_SNAKE_CASE_ ( __A : list , __A : float ) -> bool: """simple docstring""" a_ : Dict = 0 a_ : Optional[int] = 0 for i in list_vote: if i > actual_result: a_ : str = not_safe + 1 else: if abs(abs(__A ) - abs(__A ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) UpperCAmelCase_ : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]] UpperCAmelCase_ : Dict = pd.DataFrame( data_input, columns=['total_user', 'total_even', 'days'] ) UpperCAmelCase_ : int = Normalizer().fit_transform(data_input_df.values) # split data UpperCAmelCase_ : List[str] = normalize_df[:, 2].tolist() UpperCAmelCase_ : Dict = normalize_df[:, 0].tolist() UpperCAmelCase_ : List[Any] = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) UpperCAmelCase_ : int = normalize_df[:, [1, 2]].tolist() UpperCAmelCase_ : List[str] = x[: len(x) - 1] UpperCAmelCase_ : Any = x[len(x) - 1 :] # for linear regression & sarimax UpperCAmelCase_ : Optional[int] = total_date[: len(total_date) - 1] UpperCAmelCase_ : str = total_user[: len(total_user) - 1] UpperCAmelCase_ : List[Any] = total_match[: len(total_match) - 1] UpperCAmelCase_ : Optional[int] = total_date[len(total_date) - 1 :] UpperCAmelCase_ : Any = total_user[len(total_user) - 1 :] UpperCAmelCase_ : str = total_match[len(total_match) - 1 :] # voting system with forecasting UpperCAmelCase_ : Optional[Any] = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data UpperCAmelCase_ : Optional[Any] = '' if data_safety_checker(res_vote, tst_user) else 'not ' print('Today\'s data is {not_str}safe.')
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) UpperCAmelCase_ : Dict = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Tuple = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
32
1