code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Dict=() , UpperCAmelCase__ :Any=None , UpperCAmelCase__ :Union[str, Any]="no" , UpperCAmelCase__ :int="29500" ): '''simple docstring''' a = False a = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): a = True elif "IPython" in sys.modules: a = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: a = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __A ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: a = 8 a = PrepareForLaunch(__A , distributed_type="TPU" ) print(F"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(__A , args=__A , nprocs=__A , start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*__A ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__A , master_addr="127.0.01" , master_port=__A , mixed_precision=__A ): a = PrepareForLaunch(__A , distributed_type="MULTI_GPU" ) print(F"""Launching training on {num_processes} GPUs.""" ) try: start_processes(__A , args=__A , nprocs=__A , start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): a = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*__A ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Dict=() , UpperCAmelCase__ :int=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__A , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ): a = PrepareForLaunch(__A , debug=__A ) start_processes(__A , args=__A , nprocs=__A , start_method="fork" )
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case__ ) class _lowercase ( snake_case__ ): _UpperCAmelCase = field(default='''automatic-speech-recognition''', metadata={'''include_in_asdict_even_if_is_default''': True} ) _UpperCAmelCase = Features({'''audio''': Audio()} ) _UpperCAmelCase = Features({'''transcription''': Value('''string''' )} ) _UpperCAmelCase = "audio" _UpperCAmelCase = "transcription" def A ( self : List[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" if self.audio_column not in features: raise ValueError(f"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , UpperCAmelCase_ ): raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" ) a = copy.deepcopy(self ) a = self.input_schema.copy() a = features[self.audio_column] a = input_schema return task_template @property def A ( self : List[str] ) -> Dict[str, str]: """simple docstring""" return {self.audio_column: "audio", self.transcription_column: "transcription"}
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : Optional[int] = logging.get_logger(__name__) class _lowercase ( _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = ["audio_values", "audio_mask"] def __init__( self : Tuple , __lowerCAmelCase : Any=2048 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : List[str]=[16, 16] , __lowerCAmelCase : Dict=128 , __lowerCAmelCase : Any=4_4100 , __lowerCAmelCase : Dict=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : Tuple=0.0 , **__lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__( feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T def A ( self : Optional[Any] , __lowerCAmelCase : Tuple ) -> np.ndarray: """simple docstring""" a = spectrogram( A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any = None , __lowerCAmelCase : Optional[Any] = True , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Union[str, Any] = False , __lowerCAmelCase : List[str] = False , **__lowerCAmelCase : List[str] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(A_ , np.ndarray ): a = np.asarray(A_ , dtype=np.floataa ) elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , A_ ): a = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(A_ ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(A_ ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=A_ , tensor_type=A_ ) return encoded_inputs
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ : Optional[Any] = logging.get_logger(__name__) A_ : int = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} A_ : Tuple = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } A_ : Optional[Any] = { '''abeja/gpt-neox-japanese-2.7b''': 20_48, } def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' with open(_lowercase , "r" , encoding="utf-8" ) as f: a = json.loads(f.read() ) a = collections.OrderedDict() a = collections.OrderedDict() a = collections.OrderedDict() with open(_lowercase , "r" , encoding="utf-8" ) as f: a = f.readlines() a = [[t.rstrip("\n" )] if (t == ',' or ',' not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(_lowercase ): a = b a = idx for wd in b: a = idx return vocab, raw_vocab, ids_to_tokens, emoji class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Union[str, Any]="<|endoftext|>" , __lowerCAmelCase : Dict="<|startoftext|>" , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : int=False , **__lowerCAmelCase : str , ) -> Tuple: """simple docstring""" super().__init__( unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , ) if not os.path.isfile(UpperCamelCase_ ): raise ValueError( f"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained""" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(UpperCamelCase_ ): raise ValueError( f"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google""" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) a = do_clean_text a = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ ) a = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def A ( self : Any ) -> int: """simple docstring""" return len(self.raw_vocab ) def A ( self : int ) -> Union[str, Any]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def A ( self : Dict , __lowerCAmelCase : Optional[Any] ) -> Any: """simple docstring""" return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text ) def A ( self : List[str] , __lowerCAmelCase : Any ) -> str: """simple docstring""" return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) ) def A ( self : int , __lowerCAmelCase : Tuple ) -> Union[str, Any]: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ ) def A ( self : Any , __lowerCAmelCase : List[Any] ) -> str: """simple docstring""" a = ''.join(UpperCamelCase_ ).strip() return out_string def A ( self : Optional[Any] , __lowerCAmelCase : "Conversation" ) -> List[int]: """simple docstring""" a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] ) if len(UpperCamelCase_ ) > self.model_max_length: a = input_ids[-self.model_max_length :] return input_ids def A ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" a = 0 if os.path.isdir(UpperCamelCase_ ): a = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: a = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) a = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) a = token_index writer.write(",".join(UpperCamelCase_ ) + "\n" ) index += 1 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , UpperCamelCase_ ) return vocab_file, emoji_file class _lowercase ( UpperCAmelCase__ ): def __init__( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ) -> Any: """simple docstring""" a = vocab # same as swe a = ids_to_tokens # same as bpe a = emoji a = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] ) a = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) a = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) a = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) a = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) a = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) a = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) a = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' a = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' a = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : str ) -> str: """simple docstring""" return len(self.ids_to_tokens ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = self.content_repattera.sub("<URL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<EMAIL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<TEL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<DATE>" , UpperCamelCase_ ) a = self.content_repattera.sub("<DATE>" , UpperCamelCase_ ) a = self.content_repattera.sub("<PRICE>" , UpperCamelCase_ ) a = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: a = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def A ( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=False ) -> List[str]: """simple docstring""" a = text.replace(" " , "<SP>" ) a = text.replace(" " , "<SP>" ) a = text.replace("\r\n" , "<BR>" ) a = text.replace("\n" , "<BR>" ) a = text.replace("\r" , "<BR>" ) a = text.replace("\t" , "<TAB>" ) a = text.replace("—" , "ー" ) a = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: a = text.replace(UpperCamelCase_ , UpperCamelCase_ ) if clean: a = self.clean_text(UpperCamelCase_ ) def check_simbol(__lowerCAmelCase : Dict ): a = x.encode() if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2: a = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc_2a1 and c <= 0Xc_2bf) or (c >= 0Xc_780 and c <= 0Xc_783) or (c >= 0Xc_ab9 and c <= 0Xc_bbf) or (c >= 0Xc_c80 and c <= 0Xc_da2) ): return True return False def checkuae(__lowerCAmelCase : Any ): a = x.encode() if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3: a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe28_080 and c <= 0Xe2b_07f: return True return False a = 0 a = [] while pos < len(UpperCamelCase_ ): a = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 a = [] # (token_id, token, pos) for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ): a = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(UpperCamelCase_ ) > 2: a = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(UpperCamelCase_ ) > 0: # the smallest token_id is adopted a = sorted(UpperCamelCase_ , key=lambda __lowerCAmelCase : x[0] )[0] result.append(UpperCamelCase_ ) a = e else: a = pos + 1 a = text[pos:end] if check_simbol(UpperCamelCase_ ): result.append("<KIGOU>" ) elif checkuae(UpperCamelCase_ ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) a = end return result def A ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : List[str]="\n" ) -> List[str]: """simple docstring""" a = [] a = [] a = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(UpperCamelCase_ ) > 0: words.append(bytearray(UpperCamelCase_ ).decode("utf-8" , errors="replace" ) ) a = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(UpperCamelCase_ ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: words.append(bytearray(UpperCamelCase_ ).decode("utf-8" , errors="replace" ) ) a = ''.join(UpperCamelCase_ ) return text
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : Optional[int] = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class _lowercase ( lowercase__ ): _UpperCAmelCase = '''data2vec-text''' def __init__( self : Optional[Any] , __lowerCAmelCase : Dict=3_0522 , __lowerCAmelCase : Optional[int]=768 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]="absolute" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = use_cache a = classifier_dropout class _lowercase ( lowercase__ ): @property def A ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.task == "multiple-choice": a = {0: "batch", 1: "choice", 2: "sequence"} else: a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' a = [] if isinstance(lowerCamelCase_ , lowerCamelCase_ ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCamelCase_ ) ) elif isinstance(lowerCamelCase_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCamelCase_ ) ) elif isinstance(lowerCamelCase_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = [] for d in reversed(lowerCamelCase_ ): idx.append(flat_idx % d ) a = flat_idx // d return tuple(reversed(lowerCamelCase_ ) ) @torch.jit.ignore def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] = None , UpperCAmelCase__ :Tuple = None , ): '''simple docstring''' def reduce_edge_list(UpperCAmelCase__ :Any ) -> None: a = True for i in range(len(lowerCamelCase_ ) ): a = -1 * (i + 1) l[reversed_idx] &= tally a = l[reversed_idx] if start_edges is None: a = [s == 0 for s in start] reduce_edge_list(lowerCamelCase_ ) if end_edges is None: a = [e == (d - 1) for e, d in zip(lowerCamelCase_ , lowerCamelCase_ )] reduce_edge_list(lowerCamelCase_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCamelCase_ ) == 0: return [()] elif len(lowerCamelCase_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a = [] a = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCamelCase_ , lowerCamelCase_ ): if s == e: path_list.append(slice(lowerCamelCase_ , s + 1 ) ) else: break a = tuple(lowerCamelCase_ ) a = len(lowerCamelCase_ ) # start == end, and we're done if divergence_idx == len(lowerCamelCase_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a = start[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a = end[divergence_idx] return tuple( path + (slice(lowerCamelCase_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = t.shape[:no_batch_dims] a = list(_flat_idx_to_idx(lowerCamelCase_ , lowerCamelCase_ ) ) # _get_minimal_slice_set is inclusive a = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase_ ) ) # Get an ordered list of slices to perform a = _get_minimal_slice_set( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) a = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Optional[int] = False , UpperCAmelCase__ :Any = None , UpperCAmelCase__ :int = False , ): '''simple docstring''' if not (len(lowerCamelCase_ ) > 0): raise ValueError("Must provide at least one input" ) a = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase_ )] a = tuple([max(lowerCamelCase_ ) for s in zip(*lowerCamelCase_ )] ) def _prep_inputs(UpperCAmelCase__ :Dict ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a = tensor_tree_map(_prep_inputs , lowerCamelCase_ ) a = None if _out is not None: a = tensor_tree_map(lambda UpperCAmelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a = 1 for d in orig_batch_dims: flat_batch_dim *= d a = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(UpperCAmelCase__ :Tuple ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a = 0 a = prepped_outputs for _ in range(lowerCamelCase_ ): # Chunk the input if not low_mem: a = _select_chunk else: a = partial( _chunk_slice , flat_start=lowerCamelCase_ , flat_end=min(lowerCamelCase_ , i + chunk_size ) , no_batch_dims=len(lowerCamelCase_ ) , ) a = tensor_tree_map(lowerCamelCase_ , lowerCamelCase_ ) # Run the layer on the chunk a = layer(**lowerCamelCase_ ) # Allocate space for the output if out is None: a = tensor_tree_map(lambda UpperCAmelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase_ ) # Put the chunk in its pre-allocated space if isinstance(lowerCamelCase_ , lowerCamelCase_ ): def assign(UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Optional[int] ) -> None: for k, v in da.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): assign(lowerCamelCase_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a = da[k] assign(lowerCamelCase_ , lowerCamelCase_ ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): for xa, xa in zip(lowerCamelCase_ , lowerCamelCase_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: a = xa elif isinstance(lowerCamelCase_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a = tensor_tree_map(lambda UpperCAmelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase_ ) return out class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : int = 512 , ) -> Tuple: """simple docstring""" a = max_chunk_size a = None a = None def A ( self : str , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int ) -> List[Any]: """simple docstring""" logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a = [c for c in candidates if c > min_chunk_size] a = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(__lowerCAmelCase : int ) -> bool: try: with torch.no_grad(): fn(*UpperCAmelCase_ , chunk_size=UpperCAmelCase_ ) return True except RuntimeError: return False a = 0 a = len(UpperCAmelCase_ ) - 1 while i > min_viable_chunk_size_index: a = test_chunk_size(candidates[i] ) if not viable: a = (min_viable_chunk_size_index + i) // 2 else: a = i a = (i + len(UpperCAmelCase_ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def A ( self : Any , __lowerCAmelCase : Iterable , __lowerCAmelCase : Iterable ) -> Any: """simple docstring""" a = True for aa, aa in zip(UpperCAmelCase_ , UpperCAmelCase_ ): assert type(UpperCAmelCase_ ) == type(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , (list, tuple) ): consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): a = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )] a = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )] consistent &= self._compare_arg_caches(UpperCAmelCase_ , UpperCAmelCase_ ) else: consistent &= aa == aa return consistent def A ( self : List[Any] , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int , ) -> Dict: """simple docstring""" a = True a = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(UpperCAmelCase_ , torch.Tensor ) else a , UpperCAmelCase_ , UpperCAmelCase_ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(UpperCAmelCase_ ) a = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase_ ) else: # Otherwise, we can reuse the precomputed value a = False if not consistent: a = self._determine_favorable_chunk_size( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) a = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness A_ : Any = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" A_ : Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" A_ : Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" A_ : Any = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" A_ : Union[str, Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def A ( self : int ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=[1, 10, 100] , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=3.0 ) -> List[Any]: """simple docstring""" if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=_lowercase ) as executor: a = [] a = Counter() a = 0 a = defaultdict(_lowercase ) for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ): for candidate in candidates: a = candidate + '\n' + test_case a = (test_program, timeout, task_id, completion_id[task_id]) a = executor.submit(_lowercase , *_lowercase ) futures.append(_lowercase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_lowercase ): a = future.result() results[result["task_id"]].append((result["completion_id"], result) ) a = [], [] for result in results.values(): result.sort() a = [r[1]['passed'] for r in result] total.append(len(_lowercase ) ) correct.append(sum(_lowercase ) ) a = np.array(_lowercase ) a = np.array(_lowercase ) a = k a = {f"""pass@{k}""": estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Any ) -> Any: '''simple docstring''' def estimator(UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): a = itertools.repeat(UpperCAmelCase__ , len(UpperCAmelCase__ ) ) else: assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) a = iter(UpperCAmelCase__ ) return np.array([estimator(int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) , UpperCAmelCase__ ) for n, c in zip(UpperCAmelCase__ , UpperCAmelCase__ )] )
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A_ : Optional[int] = '''pt''' elif is_tf_available(): A_ : Tuple = '''tf''' else: A_ : Optional[int] = '''jax''' class _lowercase ( __A, unittest.TestCase ): _UpperCAmelCase = ByTaTokenizer _UpperCAmelCase = False def A ( self : Union[str, Any] ) -> List[str]: """simple docstring""" super().setUp() a = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return ByTaTokenizer.from_pretrained("google/byt5-small" ) def A ( self : str , **__lowerCAmelCase : Any ) -> ByTaTokenizer: """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def A ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=20 , __lowerCAmelCase : Union[str, Any]=5 ) -> Tuple[str, list]: """simple docstring""" a = [] for i in range(len(__lowerCAmelCase ) ): try: a = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) a = list(filter(lambda __lowerCAmelCase : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __lowerCAmelCase ) ) a = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) ) if max_length is not None and len(__lowerCAmelCase ) > max_length: a = toks[:max_length] if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0: while len(__lowerCAmelCase ) < min_length: a = toks + toks # toks_str = [t[1] for t in toks] a = [t[0] for t in toks] # Ensure consistency a = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) if " " not in output_txt and len(__lowerCAmelCase ) > 1: a = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase ) ) if with_prefix_space: a = ''' ''' + output_txt a = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) return output_txt, output_ids def A ( self : Optional[int] ) -> str: """simple docstring""" a = self.ta_base_tokenizer a = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) a = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] ) def A ( self : Dict ) -> List[Any]: """simple docstring""" a = self.ta_base_tokenizer a = '''Unicode €.''' a = tokenizer(__lowerCAmelCase ) a = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"] , __lowerCAmelCase ) # decoding a = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , "Unicode €.</s>" ) a = tokenizer("e è é ê ë" ) a = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"] , __lowerCAmelCase ) # decoding a = tokenizer.decode(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" ) def A ( self : Optional[int] ) -> str: """simple docstring""" a = self.ta_base_tokenizer a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off a = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on a = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) if FRAMEWORK != "jax": a = list(batch.input_ids.numpy()[0] ) else: a = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def A ( self : Dict ) -> Union[str, Any]: """simple docstring""" a = self.ta_base_tokenizer a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] a = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , __lowerCAmelCase ) self.assertIn("attention_mask" , __lowerCAmelCase ) self.assertNotIn("decoder_input_ids" , __lowerCAmelCase ) self.assertNotIn("decoder_attention_mask" , __lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = self.ta_base_tokenizer a = [ '''Summary of the text.''', '''Another summary.''', ] a = tokenizer( text_target=__lowerCAmelCase , max_length=32 , padding="max_length" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a = self.ta_base_tokenizer a = ['''A long paragraph for summarization. </s>'''] a = ['''Summary of the text. </s>'''] # fmt: off a = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] a = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on a = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , batch["input_ids"][0] ) self.assertEqual(__lowerCAmelCase , batch["labels"][0] ) def A ( self : List[str] ) -> List[str]: """simple docstring""" a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = ''' He is very happy, UNwant\u00E9d,running''' a = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) a = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) a = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) shutil.rmtree(__lowerCAmelCase ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(["bim", "bambam"] ) a = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) a = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) a = tokenizer.__class__.from_pretrained(__lowerCAmelCase ) a = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCAmelCase ) def A ( self : Any ) -> int: """simple docstring""" a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: a = json.load(__lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: a = json.load(__lowerCAmelCase ) a = [f"""<extra_id_{i}>""" for i in range(125 )] a = added_tokens_extra_ids + [ '''an_additional_special_token''' ] a = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) with open(os.path.join(__lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__lowerCAmelCase , __lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained( __lowerCAmelCase , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__lowerCAmelCase )] a = tokenizer_class.from_pretrained( __lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def A ( self : Tuple ) -> List[str]: """simple docstring""" a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCAmelCase ) a = tokenizer_class.from_pretrained(__lowerCAmelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def A ( self : Any ) -> List[str]: """simple docstring""" pass def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass def A ( self : Optional[Any] ) -> Any: """simple docstring""" a = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): a = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>'''] a = tokenizer.convert_tokens_to_string(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[Any] ) -> Tuple: """simple docstring""" a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): a = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] a = 0 a = tokenizer.convert_ids_to_tokens( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) for attr in attributes_list: setattr(__lowerCAmelCase , attr + "_id" , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , attr + "_id" ) , __lowerCAmelCase ) setattr(__lowerCAmelCase , attr + "_id" , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(getattr(__lowerCAmelCase , attr + "_id" ) , __lowerCAmelCase ) setattr(__lowerCAmelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__lowerCAmelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__lowerCAmelCase , "additional_special_tokens_ids" ) , [] ) setattr(__lowerCAmelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] ) self.assertListEqual(getattr(__lowerCAmelCase , "additional_special_tokens" ) , [token_to_test_setters] ) self.assertListEqual(getattr(__lowerCAmelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging A_ : Union[str, Any] = logging.get_logger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = r"\w+[.]\d+" a = re.findall(__lowerCAmelCase , __lowerCAmelCase ) for pat in pats: a = key.replace(__lowerCAmelCase , "_".join(pat.split("." ) ) ) return key def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple ): '''simple docstring''' a = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple=42 ): '''simple docstring''' a = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) ) a = flatten_dict(__lowerCAmelCase ) a = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a = rename_key(__lowerCAmelCase ) a = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a , a = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown a = jnp.asarray(__lowerCAmelCase ) return unflatten_dict(__lowerCAmelCase )
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType A_ : List[str] = logging.get_logger(__name__) A_ : Any = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = "imagegpt" _UpperCAmelCase = ["past_key_values"] _UpperCAmelCase = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , __lowerCAmelCase : List[str]=512 + 1 , __lowerCAmelCase : Any=32 * 32 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : str=24 , __lowerCAmelCase : Optional[Any]=8 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str="quick_gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : int=False , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=False , **__lowerCAmelCase : List[str] , ) -> Optional[Any]: """simple docstring""" a = vocab_size a = n_positions a = n_embd a = n_layer a = n_head a = n_inner a = activation_function a = resid_pdrop a = embd_pdrop a = attn_pdrop a = layer_norm_epsilon a = initializer_range a = scale_attn_weights a = use_cache a = scale_attn_by_inverse_layer_idx a = reorder_and_upcast_attn a = tie_word_embeddings super().__init__(tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ ) class _lowercase ( UpperCAmelCase__ ): @property def A ( self : int ) -> Optional[int]: """simple docstring""" return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def A ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : str = -1 , __lowerCAmelCase : Any = False , __lowerCAmelCase : int = None , __lowerCAmelCase : List[Any] = 3 , __lowerCAmelCase : Optional[Any] = 32 , __lowerCAmelCase : Union[str, Any] = 32 , ) -> List[Any]: """simple docstring""" a = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) a = dict(preprocessor(images=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) ) return inputs
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[int]=10 , __lowerCAmelCase : str=[8, 16, 32, 64] , __lowerCAmelCase : Optional[Any]=[1, 1, 2, 1] , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]="relu" , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , __lowerCAmelCase : Tuple=[2, 3, 4] , __lowerCAmelCase : Any=1 , ) -> str: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = embeddings_size a = hidden_sizes a = depths a = is_training a = use_labels a = hidden_act a = num_labels a = scope a = len(__A ) a = out_features a = out_indices a = num_groups def A ( self : int ) -> Optional[int]: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : List[Any] ) -> int: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> List[Any]: """simple docstring""" a = BitModel(config=__A ) model.to(__A ) model.eval() a = model(__A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" a = self.num_labels a = BitForImageClassification(__A ) model.to(__A ) model.eval() a = model(__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" a = BitBackbone(config=__A ) model.to(__A ) model.eval() a = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = BitBackbone(config=__A ) model.to(__A ) model.eval() a = model(__A ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : List[str] ) -> List[str]: """simple docstring""" a = self.prepare_config_and_inputs() a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : Dict ) -> Dict: """simple docstring""" a = BitModelTester(self ) a = ConfigTester(self , config_class=__A , has_text_modality=__A ) def A ( self : Tuple ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : int ) -> str: """simple docstring""" return @unittest.skip(reason="Bit does not output attentions" ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def A ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" pass def A ( self : List[str] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__A ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def A ( self : List[str] ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__A ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(config=__A ) for name, module in model.named_modules(): if isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def A ( self : Dict ) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): a = model_class(__A ) model.to(__A ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__A , __A ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__A ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a = self.model_tester.prepare_config_and_inputs_for_common() a = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: a = layer_type a = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__A , __A , __A ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def A ( self : Optional[Any] ) -> int: """simple docstring""" pass def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def A ( self : Any ) -> Optional[Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = BitModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> Dict: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A ( self : Dict ) -> str: """simple docstring""" a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A ) a = self.default_image_processor a = prepare_img() a = image_processor(images=__A , return_tensors="pt" ).to(__A ) # forward pass with torch.no_grad(): a = model(**__A ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __A ) a = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) ) @require_torch class _lowercase ( UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (BitBackbone,) if is_torch_available() else () _UpperCAmelCase = BitConfig _UpperCAmelCase = False def A ( self : Optional[Any] ) -> str: """simple docstring""" a = BitModelTester(self )
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging A_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowercase ( UpperCamelCase_ ): def __init__( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , ) -> List[Any]: """simple docstring""" super().__init__() if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1: a = ( f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`""" f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """ """to update the config accordingly as leaving `steps_offset` might led to incorrect results""" """ in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,""" """ it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`""" """ file""" ) deprecate("steps_offset!=1" , "1.0.0" , _a , standard_warn=_a ) a = dict(scheduler.config ) a = 1 a = FrozenDict(_a ) if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: a = ( f"""The configuration file of this scheduler: {scheduler} has not set the configuration""" """ `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make""" """ sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to""" """ incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face""" """ Hub, it would be very nice if you could open a Pull request for the""" """ `scheduler/scheduler_config.json` file""" ) deprecate("skip_prk_steps not set" , "1.0.0" , _a , standard_warn=_a ) a = dict(scheduler.config ) a = True a = FrozenDict(_a ) if safety_checker is None: logger.warning( f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , ) def A ( self : Any , __lowerCAmelCase : Any = "auto" ) -> Optional[int]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory a = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_a ) def A ( self : Any ) -> List[Any]: """simple docstring""" self.enable_attention_slicing(_a ) def A ( self : str ) -> Optional[Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(_a , _a ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A ( self : str ) -> Optional[int]: """simple docstring""" if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(_a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int = 512 , __lowerCAmelCase : str = 512 , __lowerCAmelCase : Optional[Any] = 50 , __lowerCAmelCase : Union[str, Any] = 7.5 , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Tuple = 1 , __lowerCAmelCase : Any = 0.0 , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Any = "pil" , __lowerCAmelCase : List[Any] = True , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : Tuple = 1 , **__lowerCAmelCase : Union[str, Any] , ) -> List[str]: """simple docstring""" a = self.segmentation_processor( text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device ) a = self.segmentation_model(**_a ) a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() a = self.numpy_to_pil(_a )[0].resize(image.size ) # Run inpainting pipeline with the generated mask a = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer A_ : Dict = logging.get_logger(__name__) A_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ : Optional[int] = { '''vocab_file''': { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt''' ), } } A_ : Optional[Any] = { '''junnyu/roformer_chinese_small''': 15_36, '''junnyu/roformer_chinese_base''': 15_36, '''junnyu/roformer_chinese_char_small''': 5_12, '''junnyu/roformer_chinese_char_base''': 5_12, '''junnyu/roformer_small_discriminator''': 1_28, '''junnyu/roformer_small_generator''': 1_28, } A_ : str = { '''junnyu/roformer_chinese_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_base''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True}, '''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True}, '''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True}, '''junnyu/roformer_small_generator''': {'''do_lower_case''': True}, } class _lowercase ( _snake_case ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase = RoFormerTokenizer def __init__( self : Optional[int] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]="[UNK]" , __lowerCAmelCase : Optional[int]="[SEP]" , __lowerCAmelCase : Union[str, Any]="[PAD]" , __lowerCAmelCase : List[str]="[CLS]" , __lowerCAmelCase : Optional[Any]="[MASK]" , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : List[Any] , ) -> str: """simple docstring""" super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , tokenize_chinese_chars=__lowerCAmelCase , strip_accents=__lowerCAmelCase , **__lowerCAmelCase , ) a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , __lowerCAmelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , __lowerCAmelCase ) != strip_accents ): a = getattr(__lowerCAmelCase , pre_tok_state.pop("type" ) ) a = do_lower_case a = strip_accents a = pre_tok_class(**__lowerCAmelCase ) a = do_lower_case def __getstate__( self : Dict ) -> Optional[int]: """simple docstring""" a = self.__dict__.copy() a = BertPreTokenizer() return state def __setstate__( self : List[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple: """simple docstring""" a = d a = self.__dict__["_tokenizer"].get_vocab() a = PreTokenizer.custom(JiebaPreTokenizer(__lowerCAmelCase ) ) def A ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any]=None ) -> str: """simple docstring""" a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Dict: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> List[str]: """simple docstring""" a = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def A ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : str , ) -> Optional[int]: """simple docstring""" a = BertPreTokenizer() return super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake A_ : Dict = numpy.array([0, 0]) A_ : Optional[int] = numpy.array([0.5, 0.8660254]) A_ : List[Any] = numpy.array([1, 0]) A_ : Tuple = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def UpperCAmelCase__( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = initial_vectors for _ in range(lowerCAmelCase__ ): a = iteration_step(lowerCAmelCase__ ) return vectors def UpperCAmelCase__( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = [] for i, start_vector in enumerate(vectors[:-1] ): a = vectors[i + 1] new_vectors.append(lowerCAmelCase__ ) a = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def UpperCAmelCase__( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = numpy.radians(lowerCAmelCase__ ) a , a = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ ) a = numpy.array(((c, -s), (s, c)) ) return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCAmelCase__( UpperCAmelCase__ :int ): '''simple docstring''' a = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a , a = zip(*lowerCAmelCase__ ) plt.plot(lowerCAmelCase__ , lowerCAmelCase__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() A_ : List[Any] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
from __future__ import annotations class _lowercase : def __init__( self : Tuple , __lowerCAmelCase : int = 0 ) -> str: """simple docstring""" a = key def A ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) a = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase ) ^ key ) for ch in content] def A ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> list[str]: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) a = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(_UpperCamelCase ) ^ key ) for ch in content] def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ) -> str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) a = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a = """""" for ch in content: ans += chr(ord(_UpperCamelCase ) ^ key ) return ans def A ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ) -> str: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) a = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a = """""" for ch in content: ans += chr(ord(_UpperCamelCase ) ^ key ) return ans def A ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int = 0 ) -> bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) try: with open(_UpperCamelCase ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCamelCase , _UpperCamelCase ) ) except OSError: return False return True def A ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> bool: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) try: with open(_UpperCamelCase ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCamelCase , _UpperCamelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : str = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class _lowercase ( a__ ): _UpperCAmelCase = '''open-llama''' def __init__( self : Optional[int] , __lowerCAmelCase : int=10_0000 , __lowerCAmelCase : str=4096 , __lowerCAmelCase : Any=1_1008 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Dict=32 , __lowerCAmelCase : Optional[Any]="silu" , __lowerCAmelCase : Optional[int]=2048 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Tuple=1E-6 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=0 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : int=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : int , ) -> Union[str, Any]: """simple docstring""" a = vocab_size a = max_position_embeddings a = hidden_size a = intermediate_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = initializer_range a = rms_norm_eps a = use_cache a = kwargs.pop( "use_memorry_efficient_attention" , _A ) a = hidden_dropout_prob a = attention_dropout_prob a = use_stable_embedding a = shared_input_output_embedding a = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , ) def A ( self : int ) -> int: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) a = self.rope_scaling.get("type" , _A ) a = self.rope_scaling.get("factor" , _A ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
from math import factorial def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple = 1_00 ): return sum(map(_lowerCamelCase , str(factorial(_lowerCamelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() A_ : Tuple = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] A_ : Dict = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Tuple ): '''simple docstring''' a = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks a = int(re.match(r".*layer_(\d*).*" , a_ )[1] ) layer_number -= 3 return F"""h.{layer_number}.""" + key def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 a = re.search(r"[^\d](\d+)$" , str(a_ ) ) if bit_search is None: raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" ) a = int(bit_search.groups()[0] ) return bit_size // 8 def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Dict , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :int ): '''simple docstring''' if bloom_config_file == "": a = BloomConfig() else: a = BloomConfig.from_json_file(a_ ) if shard_model: a = os.listdir(a_ ) a = sorted(filter(lambda UpperCAmelCase__ : s.startswith("layer" ) and "model_00" in s , a_ ) ) a = {'''weight_map''': {}, '''metadata''': {}} a = 0 a = None a = BloomConfig() for j, file in enumerate(a_ ): print("Processing file: {}".format(a_ ) ) a = None for i in range(a_ ): # load all TP files a = file.replace("model_00" , F"""model_0{i}""" ) a = torch.load(os.path.join(a_ , a_ ) , map_location="cpu" ) # Rename keys in the transformers names a = list(temp.keys() ) for key in keys: a = temp.pop(a_ ) if tensors is None: a = temp else: for key in tensors.keys(): if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a = torch.cat([tensors[key], temp[key]] , dim=a_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a = tensors[key] / pretraining_tp torch.save( a_ , os.path.join( a_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): a = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: a = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) a = BloomConfig() a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME a = total_size with open(a_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(a_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f: a = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n''' f.write(a_ ) else: a = BloomModel(a_ ) a = os.listdir(a_ ) a = sorted(filter(lambda UpperCAmelCase__ : s.startswith("layer" ) and "model_00" in s , a_ ) ) a = None for i, file in enumerate(a_ ): a = None for i in range(a_ ): # load all TP files a = file.replace("model_00" , F"""model_0{i}""" ) a = torch.load(os.path.join(a_ , a_ ) , map_location="cpu" ) # Rename keys in the transformers names a = list(temp.keys() ) for key in keys: a = temp.pop(a_ ) if tensors is None: a = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks a = torch.cat([tensors[key], temp[key]] , dim=a_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): a = tensors[key] / pretraining_tp a = model.load_state_dict(a_ , strict=a_ ) assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: a = set(other_keys.missing_keys ) else: a = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(a_ , exist_ok=a_ ) a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: a = model.to(config.torch_dtype ) torch.save(model.state_dict() , a_ ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(a_ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) A_ : str = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
A_ : List[Any] = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' A_ : Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] A_ : Union[str, Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
from collections.abc import Callable import numpy as np def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :int , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ): '''simple docstring''' a = int(np.ceil((x_end - xa) / step_size ) ) a = np.zeros((n + 1,) ) a = ya a = xa for k in range(UpperCAmelCase__ ): a = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] ) a = y[k] + ( (step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu A_ : Optional[Any] = False class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A ( self : Any ) -> Any: """simple docstring""" return 12 @property def A ( self : Tuple ) -> Dict: """simple docstring""" return 12 @property def A ( self : int ) -> List[Any]: """simple docstring""" return 32 @property def A ( self : int ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) a = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def A ( self : Optional[Any] ) -> List[Any]: """simple docstring""" a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def A ( self : Tuple ) -> int: """simple docstring""" torch.manual_seed(0 ) a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_lowercase ) @property def A ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) a = 12 a = 12 a = { """attention_bias""": True, """cross_attention_dim""": 32, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 32, """sample_size""": width, """activation_fn""": """geglu-approximate""", } a = TransformeraDModel(**_lowercase ) return model def A ( self : Any ) -> Optional[int]: """simple docstring""" a = """cpu""" a = self.dummy_vqvae a = self.dummy_text_encoder a = self.dummy_tokenizer a = self.dummy_transformer a = VQDiffusionScheduler(self.num_embed ) a = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase ) a = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) a = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) a = """teddy bear playing in the pool""" a = torch.Generator(device=_lowercase ).manual_seed(0 ) a = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" ) a = output.images a = torch.Generator(device=_lowercase ).manual_seed(0 ) a = pipe( [prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0] a = image[0, -3:, -3:, -1] a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) a = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Optional[Any] ) -> str: """simple docstring""" a = """cpu""" a = self.dummy_vqvae a = self.dummy_text_encoder a = self.dummy_tokenizer a = self.dummy_transformer a = VQDiffusionScheduler(self.num_embed ) a = LearnedClassifierFreeSamplingEmbeddings( learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) a = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) a = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) a = """teddy bear playing in the pool""" a = torch.Generator(device=_lowercase ).manual_seed(0 ) a = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" ) a = output.images a = torch.Generator(device=_lowercase ).manual_seed(0 ) a = pipe( [prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0] a = image[0, -3:, -3:, -1] a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) a = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : List[Any] ) -> List[str]: """simple docstring""" a = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) a = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though a = torch.Generator(device=_lowercase ).manual_seed(0 ) a = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , ) a = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = int(snake_case__ ) if n_element < 1: a = ValueError("a should be a positive number" ) raise my_error a = [1] a , a , a = (0, 0, 0) a = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": A_ : Tuple = input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') A_ : Union[str, Any] = hamming(int(n)) print('''-----------------------------------------------------''') print(F"""The list with nth numbers is: {hamming_numbers}""") print('''-----------------------------------------------------''')
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } a = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def A ( self : int ) -> List[str]: """simple docstring""" a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) a = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = np.random.randn(3 , 4 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def A ( self : str ) -> Optional[int]: """simple docstring""" a = np.random.randn(3 , 4 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def A ( self : int ) -> Union[str, Any]: """simple docstring""" a = np.random.randn(3 , 4 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) a = np.random.randn(3 , 4 , 5 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def A ( self : int ) -> Union[str, Any]: """simple docstring""" a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) a = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.reshape(__lowerCAmelCase , (12, 5) ) ) ) @require_torch def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = np.random.randn(3 , 4 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) ) @require_tf def A ( self : Any ) -> List[str]: """simple docstring""" a = np.random.randn(3 , 4 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) a = np.random.randn(3 , 4 , 5 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) ) @require_flax def A ( self : Optional[int] ) -> Any: """simple docstring""" a = np.random.randn(3 , 4 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) a = np.random.randn(3 , 4 , 5 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.asarray(reshape(__lowerCAmelCase , (12, 5) ) ) ) ) def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" a = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) a = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = np.random.randn(1 , 3 , 4 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def A ( self : Tuple ) -> Any: """simple docstring""" a = np.random.randn(1 , 3 , 4 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def A ( self : str ) -> Optional[Any]: """simple docstring""" a = np.random.randn(1 , 3 , 4 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) a = np.random.randn(1 , 4 , 1 , 5 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = np.random.randn(3 , 4 ) a = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def A ( self : List[Any] ) -> Dict: """simple docstring""" a = np.random.randn(3 , 4 ) a = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def A ( self : List[str] ) -> Optional[Any]: """simple docstring""" a = np.random.randn(3 , 4 ) a = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ): '''simple docstring''' a = len(UpperCAmelCase__ ) a = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): a = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): a = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: a = subset[i - 1][j] if arr[i - 1] <= j: a = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if n == 1 or not isinstance(snake_case_ , snake_case_ ): return 0 elif n == 2: return 1 else: a = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = 0 a = 2 while digits < n: index += 1 a = len(str(fibonacci(snake_case_ ) ) ) return index def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ): '''simple docstring''' return fibonacci_digits_index(snake_case_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import cva import numpy as np class _lowercase : def __init__( self : Union[str, Any] , __lowerCAmelCase : float , __lowerCAmelCase : int ) -> Optional[Any]: """simple docstring""" if k in (0.0_4, 0.0_6): a = k a = window_size else: raise ValueError("invalid k value" ) def __str__( self : Optional[int] ) -> str: """simple docstring""" return str(self.k ) def A ( self : Optional[int] , __lowerCAmelCase : str ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = img.shape a = [] a = img.copy() a = cva.cvtColor(__lowerCAmelCase , cva.COLOR_GRAY2RGB ) a = np.gradient(__lowerCAmelCase ) a = dx**2 a = dy**2 a = dx * dy a = 0.0_4 a = self.window_size // 2 for y in range(__lowerCAmelCase , h - offset ): for x in range(__lowerCAmelCase , w - offset ): a = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() a = (wxx * wyy) - (wxy**2) a = wxx + wyy a = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": A_ : Optional[int] = HarrisCorner(0.04, 3) A_ : Dict = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ : List[Any] = logging.get_logger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :int , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = original_name.split("." )[0] a = key.split("." ) a = int(key_list[key_list.index(UpperCAmelCase__ ) - 2] ) a = int(key_list[key_list.index(UpperCAmelCase__ ) - 1] ) a = orig_block_num - offset a = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' a = OrderedDict() a = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): a = key.replace("network" , "poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 a = key[: key.find("proj" )] a = key.replace(UpperCAmelCase__ , F"""patch_embeddings.{total_embed_found}.""" ) a = key.replace("proj" , "projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: a = """poolformer.encoder.""" + key if "mlp.fc1" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "mlp.fc1" , "output.conv1" ) if "mlp.fc2" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "mlp.fc2" , "output.conv2" ) if "norm1" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "norm1" , "before_norm" ) if "norm2" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "norm2" , "after_norm" ) if "layer_scale_1" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "layer_scale_1" , "layer_scale_1" ) if "layer_scale_2" in key: a = replace_key_with_offset(UpperCAmelCase__ , UpperCAmelCase__ , "layer_scale_2" , "layer_scale_2" ) if "head" in key: a = key.replace("head" , "classifier" ) a = value return new_state_dict def UpperCAmelCase__ ( ): '''simple docstring''' a = """http://images.cocodataset.org/val2017/000000039769.jpg""" a = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) return image @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Any , UpperCAmelCase__ :Dict ): '''simple docstring''' a = PoolFormerConfig() # set attributes based on model_name a = """huggingface/label-files""" a = model_name[-3:] a = 10_00 a = """imagenet-1k-id2label.json""" a = (1, 10_00) # set config attributes a = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) , "r" ) ) a = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} if size == "s12": a = [2, 2, 6, 2] a = [64, 1_28, 3_20, 5_12] a = 4.0 a = 0.9 elif size == "s24": a = [4, 4, 12, 4] a = [64, 1_28, 3_20, 5_12] a = 4.0 a = 0.9 elif size == "s36": a = [6, 6, 18, 6] a = [64, 1_28, 3_20, 5_12] a = 4.0 a = 1E-6 a = 0.9 elif size == "m36": a = [6, 6, 18, 6] a = [96, 1_92, 3_84, 7_68] a = 4.0 a = 1E-6 a = 0.95 elif size == "m48": a = [8, 8, 24, 8] a = [96, 1_92, 3_84, 7_68] a = 4.0 a = 1E-6 a = 0.95 else: raise ValueError(F"""Size {size} not supported""" ) # load image processor a = PoolFormerImageProcessor(crop_pct=UpperCAmelCase__ ) # Prepare image a = prepare_img() a = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict a = torch.load(UpperCAmelCase__ , map_location=torch.device("cpu" ) ) # rename keys a = rename_keys(UpperCAmelCase__ ) # create HuggingFace model and load state dict a = PoolFormerForImageClassification(UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() # Define image processor a = PoolFormerImageProcessor(crop_pct=UpperCAmelCase__ ) a = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values # forward pass a = model(UpperCAmelCase__ ) a = outputs.logits # define expected logit slices for different models if size == "s12": a = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": a = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": a = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": a = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": a = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(F"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1E-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : str = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A_ : Tuple = '''true''' def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Any=82 , UpperCAmelCase__ :Optional[int]=16 ): '''simple docstring''' set_seed(42 ) a = RegressionModel() a = deepcopy(snake_case__ ) a = RegressionDataset(length=snake_case__ ) a = DataLoader(snake_case__ , batch_size=snake_case__ ) model.to(accelerator.device ) a = accelerator.prepare(snake_case__ , snake_case__ ) return model, ddp_model, dataloader def UpperCAmelCase__ ( UpperCAmelCase__ :Accelerator , UpperCAmelCase__ :Union[str, Any]=False ): '''simple docstring''' a = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) a = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCAmelCase__ :int ): a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ ) return outputs with accelerator.main_process_first(): a = dataset.map( snake_case__ , batched=snake_case__ , remove_columns=["idx", "sentence1", "sentence2"] , ) a = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCAmelCase__ :int ): if use_longest: return tokenizer.pad(snake_case__ , padding="longest" , return_tensors="pt" ) return tokenizer.pad(snake_case__ , padding="max_length" , max_length=1_28 , return_tensors="pt" ) return DataLoader(snake_case__ , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=16 ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] ): '''simple docstring''' a = Accelerator(dispatch_batches=snake_case__ , split_batches=snake_case__ ) a = get_dataloader(snake_case__ , not dispatch_batches ) a = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=snake_case__ ) a = accelerator.prepare(snake_case__ , snake_case__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = [] for batch in dataloader: a = batch.values() with torch.no_grad(): a = model(snake_case__ ) a = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) a = [], [] for logit, targ in logits_and_targets: logits.append(snake_case__ ) targs.append(snake_case__ ) a = torch.cat(snake_case__ ), torch.cat(snake_case__ ) return logits, targs def UpperCAmelCase__ ( UpperCAmelCase__ :Accelerator , UpperCAmelCase__ :Optional[int]=82 , UpperCAmelCase__ :Optional[Any]=False , UpperCAmelCase__ :List[Any]=False , UpperCAmelCase__ :List[Any]=16 ): '''simple docstring''' a = get_basic_setup(snake_case__ , snake_case__ , snake_case__ ) a = generate_predictions(snake_case__ , snake_case__ , snake_case__ ) assert ( len(snake_case__ ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case__ )}""" def UpperCAmelCase__ ( UpperCAmelCase__ :bool = False , UpperCAmelCase__ :bool = False ): '''simple docstring''' a = evaluate.load("glue" , "mrpc" ) a = get_mrpc_setup(snake_case__ , snake_case__ ) # First do baseline a = setup["""no"""] model.to(snake_case__ ) model.eval() for batch in dataloader: batch.to(snake_case__ ) with torch.inference_mode(): a = model(**snake_case__ ) a = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=snake_case__ , references=batch["labels"] ) a = metric.compute() # Then do distributed a = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): a = model(**snake_case__ ) a = outputs.logits.argmax(dim=-1 ) a = batch["""labels"""] a = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=snake_case__ , references=snake_case__ ) a = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def UpperCAmelCase__ ( ): '''simple docstring''' a = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(snake_case__ , snake_case__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: a = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(snake_case__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) a = Accelerator() test_torch_metrics(snake_case__ , 5_12 ) accelerator.state._reset_state() def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Dict ) -> list: '''simple docstring''' a = word.split() def justify(UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Dict , UpperCAmelCase__ :int ) -> str: a = max_width - width a = len(__snake_case ) if len(__snake_case ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: a = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] a = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] a = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__snake_case ): num_spaces_between_words_list[i] += 1 a = [] for i in range(__snake_case ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__snake_case ) a = [] a = [] a = 0 for word in words: if width + len(__snake_case ) + len(__snake_case ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__snake_case ) width += len(__snake_case ) else: # justify the line and add it to result answer.append(justify(__snake_case , __snake_case , __snake_case ) ) # reset new line and new width a , a = [word], len(__snake_case ) a = max_width - width - len(__snake_case ) answer.append(" ".join(__snake_case ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType A_ : Optional[List[str]] = None A_ : Tuple = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image A_ : List[Any] = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class _lowercase : _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = '''PIL.Image.Image''' _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase ) def __call__( self : Optional[int] ) -> str: """simple docstring""" return self.pa_type def A ( self : int , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> Union[str, Any]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(lowercase_ , lowercase_ ): a = np.array(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return {"path": value, "bytes": None} elif isinstance(lowercase_ , lowercase_ ): return {"path": None, "bytes": value} elif isinstance(lowercase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowercase_ ) elif isinstance(lowercase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowercase_ ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" ) def A ( self : Any , __lowerCAmelCase : dict , __lowerCAmelCase : Any=None ) -> Optional[int]: """simple docstring""" if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: a = {} a = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" ) else: if is_local_path(lowercase_ ): a = PIL.Image.open(lowercase_ ) else: a = path.split("::" )[-1] try: a = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""] a = token_per_repo_id.get(lowercase_ ) except ValueError: a = None with xopen(lowercase_ , "rb" , use_auth_token=lowercase_ ) as f: a = BytesIO(f.read() ) a = PIL.Image.open(bytes_ ) else: a = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def A ( self : Any , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> Any: """simple docstring""" if pa.types.is_string(storage.type ): a = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) a = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): a = pa.array([None] * len(lowercase_ ) , type=pa.string() ) a = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: a = storage.field("bytes" ) else: a = pa.array([None] * len(lowercase_ ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: a = storage.field("path" ) else: a = pa.array([None] * len(lowercase_ ) , type=pa.string() ) a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): a = pa.array( [encode_np_array(np.array(lowercase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) a = pa.array([None] * len(lowercase_ ) , type=pa.string() ) a = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def A ( self : List[Any] , __lowerCAmelCase : pa.StructArray ) -> str: """simple docstring""" @no_op_if_value_is_null def path_to_bytes(__lowerCAmelCase : Any ): with xopen(lowercase_ , "rb" ) as f: a = f.read() return bytes_ a = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) a = pa.array( [os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(lowercase_ , self.pa_type ) def UpperCAmelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() a = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def UpperCAmelCase__ ( UpperCAmelCase__ :"PIL.Image.Image" ): '''simple docstring''' a = BytesIO() if image.format in list_image_compression_formats(): a = image.format else: a = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(UpperCAmelCase__ , format=UpperCAmelCase__ ) return buffer.getvalue() def UpperCAmelCase__ ( UpperCAmelCase__ :"PIL.Image.Image" ): '''simple docstring''' if hasattr(UpperCAmelCase__ , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def UpperCAmelCase__ ( UpperCAmelCase__ :np.ndarray ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) a = array.dtype a = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER a = dtype.kind a = dtype.itemsize a = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: a = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: a = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: a = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ ) a = np.dtype(UpperCAmelCase__ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) a = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) ) return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )} def UpperCAmelCase__ ( UpperCAmelCase__ :Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: a = first_non_null_value(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(UpperCAmelCase__ , np.ndarray ): a = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] elif isinstance(UpperCAmelCase__ , PIL.Image.Image ): a = no_op_if_value_is_null(UpperCAmelCase__ ) return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs] else: return objs else: return objs
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : Dict = { '''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''', } class _lowercase ( a__ ): _UpperCAmelCase = """git_vision_model""" def __init__( self : Tuple , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Union[str, Any]=3072 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : str=224 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : Any="quick_gelu" , __lowerCAmelCase : Tuple=1E-5 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Tuple=0.0_2 , **__lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__(**lowerCAmelCase__ ) a = hidden_size a = intermediate_size a = num_hidden_layers a = num_attention_heads a = num_channels a = patch_size a = image_size a = initializer_range a = attention_dropout a = layer_norm_eps a = hidden_act @classmethod def A ( cls : int , __lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase__ ) a = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("model_type" ) == "git": a = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class _lowercase ( a__ ): _UpperCAmelCase = """git""" def __init__( self : Dict , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=3_0522 , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : List[Any]=6 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Tuple=3072 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=1024 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : List[str]=1E-12 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Union[str, Any]="absolute" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[Any]=101 , __lowerCAmelCase : List[str]=102 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) if vision_config is None: a = {} logger.info("vision_config is None. initializing the GitVisionConfig with default values." ) a = GitVisionConfig(**lowerCAmelCase__ ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = initializer_range a = layer_norm_eps a = position_embedding_type a = use_cache a = tie_word_embeddings a = num_image_with_embedding a = bos_token_id a = eos_token_id def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.vision_config.to_dict() a = self.__class__.model_type return output
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) A_ : Union[str, Any] = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys A_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all BART models at https://huggingface.co/models?filter=bart A_ : Any = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, } A_ : List[str] = { 'facebook/bart-base': 10_24, 'facebook/bart-large': 10_24, 'facebook/bart-large-mnli': 10_24, 'facebook/bart-large-cnn': 10_24, 'facebook/bart-large-xsum': 10_24, 'yjernite/bart_eli5': 10_24, } @lru_cache() def UpperCAmelCase__ ( ): '''simple docstring''' a = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) a = bs[:] a = 0 for b in range(2**8 ): if b not in bs: bs.append(_lowercase ) cs.append(2**8 + n ) n += 1 a = [chr(_lowercase ) for n in cs] return dict(zip(_lowercase , _lowercase ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = set() a = word[0] for char in word[1:]: pairs.add((prev_char, char) ) a = char return pairs class _lowercase ( UpperCamelCase_ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : List[str]="</s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Union[str, Any]="<s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : int=False , **__lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle: a = json.load(UpperCamelCase__ ) a = {v: k for k, v in self.encoder.items()} a = errors # how to handle errors in decoding a = bytes_to_unicode() a = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle: a = merges_handle.read().split("\n" )[1:-1] a = [tuple(merge.split() ) for merge in bpe_merges] a = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) a = {} a = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions a = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def A ( self : Dict ) -> Dict: """simple docstring""" return len(self.encoder ) def A ( self : str ) -> Tuple: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def A ( self : str , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if token in self.cache: return self.cache[token] a = tuple(UpperCamelCase__ ) a = get_pairs(UpperCamelCase__ ) if not pairs: return token while True: a = min(UpperCamelCase__ , key=lambda __lowerCAmelCase : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break a = bigram a = [] a = 0 while i < len(UpperCamelCase__ ): try: a = word.index(UpperCamelCase__ , UpperCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) a = j if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 a = tuple(UpperCamelCase__ ) a = new_word if len(UpperCamelCase__ ) == 1: break else: a = get_pairs(UpperCamelCase__ ) a = ''' '''.join(UpperCamelCase__ ) a = word return word def A ( self : Tuple , __lowerCAmelCase : Any ) -> Any: """simple docstring""" a = [] for token in re.findall(self.pat , UpperCamelCase__ ): a = ''''''.join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) ) return bpe_tokens def A ( self : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) ) def A ( self : Dict , __lowerCAmelCase : List[Any] ) -> int: """simple docstring""" return self.decoder.get(UpperCamelCase__ ) def A ( self : str , __lowerCAmelCase : str ) -> Any: """simple docstring""" a = ''''''.join(UpperCamelCase__ ) a = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def A ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> List[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" ) a = 0 with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) a = token_index writer.write(" ".join(UpperCamelCase__ ) + "\n" ) index += 1 return vocab_file, merge_file def A ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Optional[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A ( self : str , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1] def A ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> str: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : int=False , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" a = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()): a = ''' ''' + text return (text, kwargs)
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available A_ : Dict = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = None _UpperCAmelCase = None class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''train''' _UpperCAmelCase = '''dev''' _UpperCAmelCase = '''test''' class _lowercase : @staticmethod def A ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[InputExample]: """simple docstring""" raise NotImplementedError @staticmethod def A ( __lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" raise NotImplementedError @staticmethod def A ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]="[CLS]" , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Tuple="[SEP]" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=-100 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : List[Any]=True , ) -> List[InputFeatures]: """simple docstring""" a = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} a = [] for ex_index, example in enumerate(_SCREAMING_SNAKE_CASE ): if ex_index % 1_0000 == 0: logger.info("Writing example %d of %d" , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) a = [] a = [] for word, label in zip(example.words , example.labels ): a = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_SCREAMING_SNAKE_CASE ) > 0: tokens.extend(_SCREAMING_SNAKE_CASE ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_SCREAMING_SNAKE_CASE ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. a = tokenizer.num_special_tokens_to_add() if len(_SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count: a = tokens[: (max_seq_length - special_tokens_count)] a = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] a = [sequence_a_segment_id] * len(_SCREAMING_SNAKE_CASE ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: a = [cls_token] + tokens a = [pad_token_label_id] + label_ids a = [cls_token_segment_id] + segment_ids a = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. a = [1 if mask_padding_with_zero else 0] * len(_SCREAMING_SNAKE_CASE ) # Zero-pad up to the sequence length. a = max_seq_length - len(_SCREAMING_SNAKE_CASE ) if pad_on_left: a = ([pad_token] * padding_length) + input_ids a = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask a = ([pad_token_segment_id] * padding_length) + segment_ids a = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(_SCREAMING_SNAKE_CASE ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(_SCREAMING_SNAKE_CASE ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(_SCREAMING_SNAKE_CASE ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(_SCREAMING_SNAKE_CASE ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(_SCREAMING_SNAKE_CASE ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: a = None features.append( InputFeatures( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = nn.CrossEntropyLoss().ignore_index def __init__( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any] = Split.train , ) -> Optional[int]: """simple docstring""" a = os.path.join( _SCREAMING_SNAKE_CASE , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_SCREAMING_SNAKE_CASE ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. a = cached_features_file + ".lock" with FileLock(_SCREAMING_SNAKE_CASE ): if os.path.exists(_SCREAMING_SNAKE_CASE ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) a = torch.load(_SCREAMING_SNAKE_CASE ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) a = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # TODO clean up all this to leverage built-in features of tokenizers a = token_classification_task.convert_examples_to_features( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , _SCREAMING_SNAKE_CASE ) def __len__( self : Any ) -> List[Any]: """simple docstring""" return len(self.features ) def __getitem__( self : Dict , __lowerCAmelCase : int ) -> InputFeatures: """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = -100 def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any=False , __lowerCAmelCase : Dict = Split.train , ) -> str: """simple docstring""" a = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # TODO clean up all this to leverage built-in features of tokenizers a = token_classification_task.convert_examples_to_features( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: a = tf.data.Dataset.from_generator( _SCREAMING_SNAKE_CASE , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: a = tf.data.Dataset.from_generator( _SCREAMING_SNAKE_CASE , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : List[str] ) -> int: """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , __lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i]
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[Any] = logging.get_logger(__name__) A_ : str = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 'decision_transformer' _UpperCAmelCase = ['past_key_values'] _UpperCAmelCase = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : int , __lowerCAmelCase : Tuple=17 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Tuple=128 , __lowerCAmelCase : Optional[Any]=4096 , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=1 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[Any]="relu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=1E-5 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=5_0256 , __lowerCAmelCase : str=5_0256 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=False , **__lowerCAmelCase : str , ) -> List[str]: """simple docstring""" a = state_dim a = act_dim a = hidden_size a = max_ep_len a = action_tanh a = vocab_size a = n_positions a = n_layer a = n_head a = n_inner a = activation_function a = resid_pdrop a = embd_pdrop a = attn_pdrop a = layer_norm_epsilon a = initializer_range a = scale_attn_weights a = use_cache a = scale_attn_by_inverse_layer_idx a = reorder_and_upcast_attn a = bos_token_id a = eos_token_id super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : str = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys A_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) a = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ : Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Tuple = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Union[str, Any] = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : int = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[int] = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys A_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration A_ : List[str] = HfArgumentParser(InitializationArguments) A_ : List[str] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization A_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks A_ : Optional[Any] = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) A_ : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config A_ : Dict = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: A_ : Optional[int] = None A_ : List[Any] = logging.get_logger(__name__) A_ : int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ : Union[str, Any] = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } A_ : Tuple = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } A_ : Optional[Any] = '''▁''' class _lowercase ( __a ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = BigBirdTokenizer _UpperCAmelCase = ["input_ids", "attention_mask"] _UpperCAmelCase = [] def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : str="<s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Any="[SEP]" , __lowerCAmelCase : Any="[MASK]" , __lowerCAmelCase : Optional[int]="[CLS]" , **__lowerCAmelCase : Any , ) -> int: """simple docstring""" a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else bos_token a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else cls_token a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it a = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) a = vocab_file a = False if not self.vocab_file else True def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] = None ) -> List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Dict = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] def A ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] = None ) -> List[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' while second != 0: a = first & second first ^= second a = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() A_ : Tuple = int(input('''Enter the first number: ''').strip()) A_ : List[Any] = int(input('''Enter the second number: ''').strip()) print(F"""{add(first, second) = }""")
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :int ): '''simple docstring''' a = s.rsplit(_UpperCamelCase , _UpperCamelCase ) return new.join(_UpperCamelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' a = {} a = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: a = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" ) if "res_path" in key: a = key.replace("res_path." , "res_path.path." ) if key.endswith(".w" ): a = rreplace(_UpperCamelCase , ".w" , ".weight" , 1 ) if key.endswith(".b" ): a = rreplace(_UpperCamelCase , ".b" , ".bias" , 1 ) a = value.float() return upgrade @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Dict , UpperCAmelCase__ :int=None , UpperCAmelCase__ :List[Any]=True ): '''simple docstring''' from dall_e import Encoder a = Encoder() if os.path.exists(_UpperCamelCase ): a = torch.load(_UpperCamelCase ) else: a = torch.hub.load_state_dict_from_url(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ): a = ckpt.state_dict() encoder.load_state_dict(_UpperCamelCase ) if config_path is not None: a = FlavaImageCodebookConfig.from_pretrained(_UpperCamelCase ) else: a = FlavaImageCodebookConfig() a = FlavaImageCodebook(_UpperCamelCase ).eval() a = encoder.state_dict() a = upgrade_state_dict(_UpperCamelCase ) hf_model.load_state_dict(_UpperCamelCase ) a = hf_model.state_dict() a = count_parameters(_UpperCamelCase ) a = count_parameters(_UpperCamelCase ) assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(_UpperCamelCase ) else: return hf_state_dict if __name__ == "__main__": A_ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') A_ : Tuple = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str ): '''simple docstring''' a = list(UpperCAmelCase__ ) a = list(UpperCAmelCase__ ) a = 0 for i in range(len(UpperCAmelCase__ ) ): if lista[i] != lista[i]: count += 1 a = "_" if count > 1: return False else: return "".join(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ :list[str] ): '''simple docstring''' a = [] while True: a = ["$"] * len(UpperCAmelCase__ ) a = [] for i in range(len(UpperCAmelCase__ ) ): for j in range(i + 1 , len(UpperCAmelCase__ ) ): a = compare_string(binary[i] , binary[j] ) if k is False: a = "*" a = "*" temp.append("X" ) for i in range(len(UpperCAmelCase__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(UpperCAmelCase__ ) == 0: return pi a = list(set(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :Sequence[float] ): '''simple docstring''' a = [] for minterm in minterms: a = "" for _ in range(UpperCAmelCase__ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(UpperCAmelCase__ ) return temp def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str , UpperCAmelCase__ :int ): '''simple docstring''' a = list(UpperCAmelCase__ ) a = list(UpperCAmelCase__ ) a = 0 for i in range(len(UpperCAmelCase__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase__ ( UpperCAmelCase__ :list[list[int]] , UpperCAmelCase__ :list[str] ): '''simple docstring''' a = [] a = [0] * len(UpperCAmelCase__ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(UpperCAmelCase__ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(UpperCAmelCase__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(UpperCAmelCase__ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(UpperCAmelCase__ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(UpperCAmelCase__ ) ): a = 0 def UpperCAmelCase__ ( UpperCAmelCase__ :list[str] , UpperCAmelCase__ :list[str] ): '''simple docstring''' a = [[0 for x in range(len(UpperCAmelCase__ ) )] for x in range(len(UpperCAmelCase__ ) )] for i in range(len(UpperCAmelCase__ ) ): a = prime_implicants[i].count("_" ) for j in range(len(UpperCAmelCase__ ) ): if is_for_table(prime_implicants[i] , binary[j] , UpperCAmelCase__ ): a = 1 return chart def UpperCAmelCase__ ( ): '''simple docstring''' a = int(input("Enter the no. of variables\n" ) ) a = [ float(UpperCAmelCase__ ) for x in input( "Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split() ] a = decimal_to_binary(UpperCAmelCase__ , UpperCAmelCase__ ) a = check(UpperCAmelCase__ ) print("Prime Implicants are:" ) print(UpperCAmelCase__ ) a = prime_implicant_chart(UpperCAmelCase__ , UpperCAmelCase__ ) a = selection(UpperCAmelCase__ , UpperCAmelCase__ ) print("Essential Prime Implicants are:" ) print(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ : Optional[Any] = logging.get_logger(__name__) A_ : Dict = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 'conditional_detr' _UpperCAmelCase = ['past_key_values'] _UpperCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[Any]=300 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : str=2048 , __lowerCAmelCase : Optional[Any]=8 , __lowerCAmelCase : Any=6 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]="relu" , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Dict=1.0 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]="sine" , __lowerCAmelCase : Any="resnet50" , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=0.2_5 , **__lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): a = backbone_config.get("model_type" ) a = CONFIG_MAPPING[backbone_model_type] a = config_class.from_dict(__lowerCAmelCase ) a = use_timm_backbone a = backbone_config a = num_channels a = num_queries a = d_model a = encoder_ffn_dim a = encoder_layers a = encoder_attention_heads a = decoder_ffn_dim a = decoder_layers a = decoder_attention_heads a = dropout a = attention_dropout a = activation_dropout a = activation_function a = init_std a = init_xavier_std a = encoder_layerdrop a = decoder_layerdrop a = encoder_layers a = auxiliary_loss a = position_embedding_type a = backbone a = use_pretrained_backbone a = dilation # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = mask_loss_coefficient a = dice_loss_coefficient a = cls_loss_coefficient a = bbox_loss_coefficient a = giou_loss_coefficient a = focal_alpha super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase ) @property def A ( self : Optional[int] ) -> int: """simple docstring""" return self.encoder_attention_heads @property def A ( self : Tuple ) -> int: """simple docstring""" return self.d_model def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: a = self.backbone_config.to_dict() a = self.__class__.model_type return output class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = version.parse('''1.11''' ) @property def A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def A ( self : Any ) -> float: """simple docstring""" return 1E-5 @property def A ( self : List[Any] ) -> int: """simple docstring""" return 12
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _lowercase ( __snake_case ): _UpperCAmelCase = (DDIMParallelScheduler,) _UpperCAmelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def A ( self : str , **__lowerCAmelCase : str ) -> str: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "clip_sample": True, } config.update(**__UpperCamelCase ) return config def A ( self : Union[str, Any] , **__lowerCAmelCase : Tuple ) -> List[str]: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(**__UpperCamelCase ) a = scheduler_class(**__UpperCamelCase ) a , a = 10, 0.0 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for t in scheduler.timesteps: a = model(__UpperCamelCase , __UpperCamelCase ) a = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def A ( self : Optional[Any] ) -> Any: """simple docstring""" for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def A ( self : Any ) -> int: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__UpperCamelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(steps_offset=1 ) a = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__UpperCamelCase ) def A ( self : List[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def A ( self : Any ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCamelCase ) def A ( self : Dict ) -> List[Any]: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__UpperCamelCase ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=__UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=__UpperCamelCase ) def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase ) def A ( self : Optional[int] ) -> Any: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase ) def A ( self : int ) -> Optional[Any]: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5 def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__UpperCamelCase ) a , a = 10, 0.0 scheduler.set_timesteps(__UpperCamelCase ) a = self.dummy_model() a = self.dummy_sample_deter a = self.dummy_sample_deter + 0.1 a = self.dummy_sample_deter - 0.1 a = samplea.shape[0] a = torch.stack([samplea, samplea, samplea] , dim=0 ) a = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase ) a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) a = scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase ) a = torch.sum(torch.abs(__UpperCamelCase ) ) a = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2 assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3 def A ( self : Tuple ) -> List[str]: """simple docstring""" a = self.full_loop() a = torch.sum(torch.abs(__UpperCamelCase ) ) a = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3 def A ( self : Optional[Any] ) -> Dict: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.sum(torch.abs(__UpperCamelCase ) ) a = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2 assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> Tuple: """simple docstring""" a = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 ) a = torch.sum(torch.abs(__UpperCamelCase ) ) a = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3 def A ( self : str ) -> Any: """simple docstring""" a = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 ) a = torch.sum(torch.abs(__UpperCamelCase ) ) a = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2 assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
import os import numpy import onnx def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :str ): '''simple docstring''' a = a.name a = b.name a = '''''' a = '''''' a = a == b a = name_a a = name_b return res def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :int ): '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :str , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' a = list(model.graph.initializer ) a = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i a = inits[i].name a = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' a = os.path.dirname(lowerCAmelCase_ ) a = os.path.basename(lowerCAmelCase_ ) a = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) a = list(model.graph.initializer ) a = set() a = {} a = [] a = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) a = inits[j].data_type a = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("unexpected data type: " , lowerCAmelCase_ ) total_reduced_size += mem_size a = inits[i].name a = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: a = [name_j] ind_to_replace.append((j, i) ) print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" ) a = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) a = '''optimized_''' + model_file_name a = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 A_ : Optional[int] = sys.version_info >= (3, 10) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict=None , UpperCAmelCase__ :Any=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) @dataclass class _lowercase : _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None class _lowercase ( _UpperCAmelCase ): _UpperCAmelCase = """titi""" _UpperCAmelCase = """toto""" class _lowercase ( _UpperCAmelCase ): _UpperCAmelCase = """titi""" _UpperCAmelCase = """toto""" _UpperCAmelCase = 42 @dataclass class _lowercase : _UpperCAmelCase = "toto" def A ( self : Optional[int] ) -> Dict: """simple docstring""" a = BasicEnum(self.foo ) @dataclass class _lowercase : _UpperCAmelCase = "toto" def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MixedTypeEnum(self.foo ) @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) @dataclass class _lowercase : _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[1, 2, 3] ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) _UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class _lowercase : _UpperCAmelCase = field() _UpperCAmelCase = field() _UpperCAmelCase = field() def A ( self : Any ) -> Optional[Any]: """simple docstring""" a = BasicEnum(self.required_enum ) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = field() _UpperCAmelCase = None _UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} ) _UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class _lowercase : _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = None @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = field(default=_UpperCAmelCase, metadata={'''help''': '''help message'''} ) _UpperCAmelCase = None _UpperCAmelCase = list_field(default=[] ) _UpperCAmelCase = list_field(default=[] ) class _lowercase ( unittest.TestCase ): def A ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): a = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""} a = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , lowercase__ ) and yy.get("choices" , lowercase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](lowercase__ ) , yy["type"](lowercase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowercase__ , lowercase__ ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument("--foo" , type=lowercase__ , required=lowercase__ ) expected.add_argument("--bar" , type=lowercase__ , required=lowercase__ ) expected.add_argument("--baz" , type=lowercase__ , required=lowercase__ ) expected.add_argument("--flag" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="?" ) self.argparsersEqual(lowercase__ , lowercase__ ) a = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] (a ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ ) self.assertFalse(example.flag ) def A ( self : Any ) -> Optional[Any]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=lowercase__ ) expected.add_argument("--baz" , default="toto" , type=lowercase__ , help="help message" ) self.argparsersEqual(lowercase__ , lowercase__ ) def A ( self : Tuple ) -> int: """simple docstring""" a = argparse.ArgumentParser() expected.add_argument("--foo" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="?" ) expected.add_argument("--baz" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=lowercase__ , dest="baz" ) expected.add_argument("--opt" , type=lowercase__ , default=lowercase__ ) a = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: a = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) a = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) a = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) a = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) a = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) a = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) a = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) a = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) a = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) a = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) a = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) a = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def A ( self : Any ) -> Union[str, Any]: """simple docstring""" @dataclass class _lowercase : _UpperCAmelCase = "toto" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(lowercase__ , lowercase__ ) a = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) a = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) a = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def A ( self : List[str] ) -> Tuple: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=lowercase__ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=lowercase__ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase__ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) a = parser.parse_args([] ) self.assertEqual( lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) a = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" a = argparse.ArgumentParser() expected.add_argument("--foo" , default=lowercase__ , type=lowercase__ ) expected.add_argument("--bar" , default=lowercase__ , type=lowercase__ , help="help message" ) expected.add_argument("--baz" , default=lowercase__ , type=lowercase__ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=lowercase__ ) expected.add_argument("--des" , nargs="+" , default=[] , type=lowercase__ ) a = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowercase__ ) for dataclass_type in dataclass_types: a = HfArgumentParser(lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) a = parser.parse_args([] ) self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) ) a = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.1_4 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def A ( self : int ) -> List[str]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=lowercase__ , required=lowercase__ ) expected.add_argument("--required_str" , type=lowercase__ , required=lowercase__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase__ , ) self.argparsersEqual(lowercase__ , lowercase__ ) def A ( self : Optional[Any] ) -> Dict: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = argparse.ArgumentParser() expected.add_argument("--foo" , type=lowercase__ , required=lowercase__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase__ , ) expected.add_argument("--opt" , type=lowercase__ , default=lowercase__ ) expected.add_argument("--baz" , default="toto" , type=lowercase__ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase__ ) self.argparsersEqual(lowercase__ , lowercase__ ) def A ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } a = parser.parse_dict(lowercase__ )[0] a = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ ) def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: a = os.path.join(lowercase__ , "temp_json" ) os.mkdir(lowercase__ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(lowercase__ , lowercase__ ) a = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] a = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def A ( self : Union[str, Any] ) -> Tuple: """simple docstring""" a = HfArgumentParser(lowercase__ ) a = { """foo""": 12, """bar""": 3.1_4, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: a = os.path.join(lowercase__ , "temp_yaml" ) os.mkdir(lowercase__ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(lowercase__ , lowercase__ ) a = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] a = BasicExample(**lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) def A ( self : Tuple ) -> str: """simple docstring""" a = HfArgumentParser(lowercase__ ) self.assertIsNotNone(lowercase__ )
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ : Dict = logging.get_logger(__name__) A_ : Any = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class _lowercase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '''conditional_detr''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=300 , __lowerCAmelCase : Tuple=6 , __lowerCAmelCase : Union[str, Any]=2048 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : List[str]=6 , __lowerCAmelCase : int=2048 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict="relu" , __lowerCAmelCase : int=256 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : Union[str, Any]=1.0 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]="sine" , __lowerCAmelCase : List[Any]="resnet50" , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : int=2 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=0.2_5 , **__lowerCAmelCase : List[Any] , ) -> Tuple: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) a = CONFIG_MAPPING['''resnet'''](out_features=["stage4"] ) elif isinstance(__snake_case , __snake_case ): a = backbone_config.get("model_type" ) a = CONFIG_MAPPING[backbone_model_type] a = config_class.from_dict(__snake_case ) a = use_timm_backbone a = backbone_config a = num_channels a = num_queries a = d_model a = encoder_ffn_dim a = encoder_layers a = encoder_attention_heads a = decoder_ffn_dim a = decoder_layers a = decoder_attention_heads a = dropout a = attention_dropout a = activation_dropout a = activation_function a = init_std a = init_xavier_std a = encoder_layerdrop a = decoder_layerdrop a = encoder_layers a = auxiliary_loss a = position_embedding_type a = backbone a = use_pretrained_backbone a = dilation # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = mask_loss_coefficient a = dice_loss_coefficient a = cls_loss_coefficient a = bbox_loss_coefficient a = giou_loss_coefficient a = focal_alpha super().__init__(is_encoder_decoder=__snake_case , **__snake_case ) @property def A ( self : List[str] ) -> Optional[int]: """simple docstring""" return self.encoder_attention_heads @property def A ( self : int ) -> Optional[int]: """simple docstring""" return self.d_model def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: a = self.backbone_config.to_dict() a = self.__class__.model_type return output class _lowercase ( __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = version.parse('''1.11''' ) @property def A ( self : List[str] ) -> Tuple: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def A ( self : Any ) -> Any: """simple docstring""" return 1E-5 @property def A ( self : Any ) -> Optional[int]: """simple docstring""" return 12
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
import operator def UpperCAmelCase__ ( UpperCAmelCase__ :list , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :list | None = None ): '''simple docstring''' a = operator.lt if reverse else operator.gt a = solution or [] if not arr: return solution a = [arr.pop(0 )] for i, item in enumerate(UpperCAmelCase__ ): if _operator(UpperCAmelCase__ , sublist[-1] ): sublist.append(UpperCAmelCase__ ) arr.pop(UpperCAmelCase__ ) # merging sublist into solution list if not solution: solution.extend(UpperCAmelCase__ ) else: while sublist: a = sublist.pop(0 ) for i, xx in enumerate(UpperCAmelCase__ ): if not _operator(UpperCAmelCase__ , UpperCAmelCase__ ): solution.insert(UpperCAmelCase__ , UpperCAmelCase__ ) break else: solution.append(UpperCAmelCase__ ) strand_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' a = [int(lowercase_ ) for i in ip_va_address.split("." ) if i.isdigit()] return len(lowercase_ ) == 4 and all(0 <= int(lowercase_ ) <= 2_54 for octet in octets ) if __name__ == "__main__": A_ : int = input().strip() A_ : List[str] = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar A_ : Any = TypeVar('''KEY''') A_ : Any = TypeVar('''VAL''') @dataclass(frozen=UpperCAmelCase_, slots=UpperCAmelCase_ ) class _lowercase ( Generic[KEY, VAL] ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase ( _Item ): def __init__( self : int ) -> Any: """simple docstring""" super().__init__(_snake_case , _snake_case ) def __bool__( self : List[str] ) -> Optional[Any]: """simple docstring""" return False A_ : Optional[Any] = _DeletedItem() class _lowercase ( MutableMapping[KEY, VAL] ): def __init__( self : List[Any] , __lowerCAmelCase : int = 8 , __lowerCAmelCase : float = 0.7_5 ) -> Tuple: """simple docstring""" a = initial_block_size a = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 a = capacity_factor a = 0 def A ( self : Dict , __lowerCAmelCase : KEY ) -> Optional[int]: """simple docstring""" return hash(_snake_case ) % len(self._buckets ) def A ( self : Optional[Any] , __lowerCAmelCase : int ) -> List[str]: """simple docstring""" return (ind + 1) % len(self._buckets ) def A ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : KEY , __lowerCAmelCase : VAL ) -> List[Any]: """simple docstring""" a = self._buckets[ind] if not stored: a = _Item(_snake_case , _snake_case ) self._len += 1 return True elif stored.key == key: a = _Item(_snake_case , _snake_case ) return True else: return False def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_snake_case ) def A ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False a = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def A ( self : List[Any] , __lowerCAmelCase : int ) -> Optional[Any]: """simple docstring""" a = self._buckets a = [None] * new_size a = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def A ( self : Optional[Any] ) -> str: """simple docstring""" self._resize(len(self._buckets ) * 2 ) def A ( self : Dict ) -> str: """simple docstring""" self._resize(len(self._buckets ) // 2 ) def A ( self : str , __lowerCAmelCase : KEY ) -> Tuple: """simple docstring""" a = self._get_bucket_index(_snake_case ) for _ in range(len(self._buckets ) ): yield ind a = self._get_next_ind(_snake_case ) def A ( self : Any , __lowerCAmelCase : KEY , __lowerCAmelCase : VAL ) -> Union[str, Any]: """simple docstring""" for ind in self._iterate_buckets(_snake_case ): if self._try_set(_snake_case , _snake_case , _snake_case ): break def __setitem__( self : Union[str, Any] , __lowerCAmelCase : KEY , __lowerCAmelCase : VAL ) -> Tuple: """simple docstring""" if self._is_full(): self._size_up() self._add_item(_snake_case , _snake_case ) def __delitem__( self : List[str] , __lowerCAmelCase : KEY ) -> Union[str, Any]: """simple docstring""" for ind in self._iterate_buckets(_snake_case ): a = self._buckets[ind] if item is None: raise KeyError(_snake_case ) if item is _deleted: continue if item.key == key: a = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : List[Any] , __lowerCAmelCase : KEY ) -> int: """simple docstring""" for ind in self._iterate_buckets(_snake_case ): a = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_snake_case ) def __len__( self : Any ) -> str: """simple docstring""" return self._len def __iter__( self : List[str] ) -> List[str]: """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[int] ) -> Dict: """simple docstring""" a = " ,".join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' a = len(UpperCAmelCase__ ) a = sum(UpperCAmelCase__ ) a = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): a = True for i in range(1 , s + 1 ): a = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): a = dp[i][j - 1] if arr[i - 1] <= j: a = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: a = s - 2 * j break return diff
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Tuple ): '''simple docstring''' a = [0 for i in range(r + 1 )] # nc0 = 1 a = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a = min(snake_case_ , snake_case_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys A_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Any ) -> Dict: """simple docstring""" a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase ) a = -1 a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) a = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: a = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer a = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase ) a = -1 a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) a = tokenizer.decode(greedy_ids[0] ) a = TextIteratorStreamer(_lowerCamelCase ) a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} a = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() a = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def A ( self : Optional[Any] ) -> str: """simple docstring""" a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase ) a = -1 a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) a = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) a = greedy_ids[:, input_ids.shape[1] :] a = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: a = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer a = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def A ( self : List[str] ) -> Optional[Any]: """simple docstring""" a = AutoTokenizer.from_pretrained("distilgpt2" ) a = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCamelCase ) a = -1 a = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: a = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token a = cs.out[:-1] # Remove the final "\n" a = tokenizer(_lowerCamelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def A ( self : Dict ) -> Union[str, Any]: """simple docstring""" a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) a = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCamelCase ) a = -1 a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) a = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_0_1 ) a = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} a = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): a = "" for new_text in streamer: streamer_text += new_text
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :list[float] ): '''simple docstring''' a = 0.00 a = 0 for resistor in resistors: if resistor <= 0: a = F"""Resistor at index {index} has a negative or zero value!""" raise ValueError(__snake_case ) first_sum += 1 / float(__snake_case ) index += 1 return 1 / first_sum def UpperCAmelCase__ ( UpperCAmelCase__ :list[float] ): '''simple docstring''' a = 0.00 a = 0 for resistor in resistors: sum_r += resistor if resistor < 0: a = F"""Resistor at index {index} has a negative value!""" raise ValueError(__snake_case ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class _lowercase ( __lowercase ): _UpperCAmelCase = ['image_processor', 'tokenizer'] _UpperCAmelCase = 'BlipImageProcessor' _UpperCAmelCase = 'AutoTokenizer' def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().__init__(__A , __A ) # add QFormer tokenizer a = qformer_tokenizer def __call__( self : int , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Dict = True , __lowerCAmelCase : List[str] = False , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Optional[Any] = 0 , __lowerCAmelCase : Dict = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any = False , __lowerCAmelCase : Optional[int] = False , __lowerCAmelCase : Union[str, Any] = False , __lowerCAmelCase : Optional[int] = False , __lowerCAmelCase : Dict = False , __lowerCAmelCase : int = True , __lowerCAmelCase : Any = None , **__lowerCAmelCase : Union[str, Any] , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError("You have to specify at least images or text." ) a = BatchFeature() if text is not None: a = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) encoding.update(__A ) a = self.qformer_tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) a = qformer_text_encoding.pop("input_ids" ) a = qformer_text_encoding.pop("attention_mask" ) if images is not None: a = self.image_processor(__A , return_tensors=__A ) encoding.update(__A ) return encoding def A ( self : Dict , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[Any] ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def A ( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def A ( self : Optional[int] ) -> str: """simple docstring""" a = self.tokenizer.model_input_names a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : str ) -> List[str]: """simple docstring""" if os.path.isfile(__A ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) a = os.path.join(__A , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(__A ) return super().save_pretrained(__A , **__A ) @classmethod def A ( cls : Dict , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = AutoTokenizer.from_pretrained(__A , subfolder="qformer_tokenizer" ) a = cls._get_arguments_from_pretrained(__A , **__A ) args.append(__A ) return cls(*__A )
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
import math import sys def UpperCAmelCase__( UpperCAmelCase__ :int ): '''simple docstring''' if number != int(UpperCamelCase__ ): raise ValueError("the value of input must be a natural number" ) if number < 0: raise ValueError("the value of input must not be a negative number" ) if number == 0: return 1 a = [-1] * (number + 1) a = 0 for i in range(1 , number + 1 ): a = sys.maxsize a = int(math.sqrt(UpperCamelCase__ ) ) for j in range(1 , root + 1 ): a = 1 + answers[i - (j**2)] a = min(UpperCamelCase__ , UpperCamelCase__ ) a = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = [0 for i in range(r + 1 )] # nc0 = 1 a = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a = min(_UpperCAmelCase , _UpperCAmelCase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A_ : str = logging.get_logger(__name__) A_ : Tuple = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) A_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: a = model_type_to_module_name(UpperCAmelCase__ ) a = importlib.import_module(F""".{module_name}""" , "transformers.models" ) try: return getattr(UpperCAmelCase__ , UpperCAmelCase__ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(UpperCAmelCase__ , "__name__" , UpperCAmelCase__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. a = importlib.import_module("transformers" ) if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): return getattr(UpperCAmelCase__ , UpperCAmelCase__ ) return None def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, os.PathLike] , UpperCAmelCase__ :Optional[Union[str, os.PathLike]] = None , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :bool = False , UpperCAmelCase__ :Optional[Dict[str, str]] = None , UpperCAmelCase__ :Optional[Union[bool, str]] = None , UpperCAmelCase__ :Optional[str] = None , UpperCAmelCase__ :bool = False , **UpperCAmelCase__ :Optional[Any] , ): '''simple docstring''' a = get_file_from_repo( UpperCAmelCase__ , UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , force_download=UpperCAmelCase__ , resume_download=UpperCAmelCase__ , proxies=UpperCAmelCase__ , use_auth_token=UpperCAmelCase__ , revision=UpperCAmelCase__ , local_files_only=UpperCAmelCase__ , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(UpperCAmelCase__ , encoding="utf-8" ) as reader: return json.load(UpperCAmelCase__ ) class _lowercase : def __init__( self : Optional[int] ) -> Dict: """simple docstring""" raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(__a ) def A ( cls : int , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" a = kwargs.pop("config" , __a ) a = kwargs.pop("trust_remote_code" , __a ) a = True a = FeatureExtractionMixin.get_feature_extractor_dict(__a , **__a ) a = config_dict.get("feature_extractor_type" , __a ) a = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): a = config_dict['auto_map']['AutoFeatureExtractor'] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__a , __a ): a = AutoConfig.from_pretrained(__a , **__a ) # It could be in `config.feature_extractor_type`` a = getattr(__a , "feature_extractor_type" , __a ) if hasattr(__a , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: a = config.auto_map['AutoFeatureExtractor'] if feature_extractor_class is not None: a = feature_extractor_class_from_name(__a ) a = feature_extractor_auto_map is not None a = feature_extractor_class is not None or type(__a ) in FEATURE_EXTRACTOR_MAPPING a = resolve_trust_remote_code( __a , __a , __a , __a ) if has_remote_code and trust_remote_code: a = get_class_from_dynamic_module( __a , __a , **__a ) a = kwargs.pop("code_revision" , __a ) if os.path.isdir(__a ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__a , **__a ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__a , **__a ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__a ) in FEATURE_EXTRACTOR_MAPPING: a = FEATURE_EXTRACTOR_MAPPING[type(__a )] return feature_extractor_class.from_dict(__a , **__a ) raise ValueError( f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """ f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def A ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(__a , __a )
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _UpperCAmelCase = '''CIDAS/clipseg-rd64-refined''' _UpperCAmelCase = '''image_segmenter''' _UpperCAmelCase = CLIPSegForImageSegmentation _UpperCAmelCase = ['''image''', '''text'''] _UpperCAmelCase = ['''image'''] def __init__( self : Optional[int] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__A , **__A ) def A ( self : Any , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ) -> List[Any]: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="pt" ) def A ( self : int , __lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" with torch.no_grad(): a = self.model(**__A ).logits return logits def A ( self : Optional[int] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = outputs.cpu().detach().numpy() a = 0 a = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
import os def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' a = len(grid[0] ) a = len(_lowerCamelCase ) a = 0 a = 0 a = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(_lowerCamelCase ): for j in range(n_rows - 3 ): a = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] a = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: a = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: a = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) a = max( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if max_product > largest: a = max_product return largest def UpperCAmelCase__ ( ): '''simple docstring''' a = [] with open(os.path.dirname(_lowerCamelCase ) + "/grid.txt" ) as file: for line in file: grid.append(line.strip("\n" ).split(" " ) ) a = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )] return largest_product(_lowerCamelCase ) if __name__ == "__main__": print(solution())
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase = '''CLIPImageProcessor''' _UpperCAmelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : List[Any] ) -> List[Any]: """simple docstring""" a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , A__ , ) a = kwargs.pop("feature_extractor" ) a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(A__ , A__ ) def __call__( self : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : int ) -> List[Any]: """simple docstring""" if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: a = self.tokenizer(A__ , return_tensors=A__ , **A__ ) if images is not None: a = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None and images is not None: a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ ) def A ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*A__ , **A__ ) def A ( self : Optional[int] , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*A__ , **A__ ) @property def A ( self : Optional[Any] ) -> Any: """simple docstring""" a = self.tokenizer.model_input_names a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A__ , ) return self.image_processor_class @property def A ( self : str ) -> List[str]: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A__ , ) return self.image_processor
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class _lowercase ( unittest.TestCase ): def A ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" a = tempfile.mkdtemp() # fmt: off a = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a = dict(zip(_a , range(len(_a ) ) ) ) a = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_a ) ) a = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], "image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } a = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(_a , _a ) def A ( self : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a ) def A ( self : Dict , **__lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def A ( self : Optional[int] , **__lowerCAmelCase : int ) -> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def A ( self : List[str] ) -> str: """simple docstring""" a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" a = self.get_tokenizer() a = self.get_rust_tokenizer() a = self.get_image_processor() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) processor_slow.save_pretrained(self.tmpdirname ) a = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_a ) a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) processor_fast.save_pretrained(self.tmpdirname ) a = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _a ) self.assertIsInstance(processor_fast.tokenizer , _a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _a ) self.assertIsInstance(processor_fast.image_processor , _a ) def A ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" a = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) a = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) a = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def A ( self : List[str] ) -> List[str]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) a = self.prepare_image_inputs() a = image_processor(_a , return_tensors="np" ) a = processor(images=_a , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) a = "lower newer" a = processor(text=_a ) a = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) a = "lower newer" a = self.prepare_image_inputs() a = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) a = self.prepare_image_inputs() a = self.prepare_image_inputs() a = processor(images=_a , visual_prompt=_a ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def A ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" a = self.get_image_processor() a = self.get_tokenizer() a = CLIPSegProcessor(tokenizer=_a , image_processor=_a ) a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a = processor.batch_decode(_a ) a = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a )
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import os from distutils.util import strtobool def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Any ): '''simple docstring''' for e in env_keys: a = int(os.environ.get(_lowerCamelCase , -1 ) ) if val >= 0: return val return default def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Optional[Any]=False ): '''simple docstring''' a = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) ) return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int... def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Tuple="no" ): '''simple docstring''' a = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) ) return value
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
from __future__ import annotations import requests A_ : Tuple = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Optional[Any] = 1 , UpperCAmelCase__ :int = "new" , UpperCAmelCase__ :List[Any] = None ): '''simple docstring''' a = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(UpperCAmelCase__ ) - valid_terms ) ): a = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(UpperCAmelCase__ ) a = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , ) if response.status_code == 4_29: raise requests.HTTPError a = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(UpperCAmelCase__ )} a = {} for id_ in range(UpperCAmelCase__ ): a = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :list[list[int | float]] ): '''simple docstring''' a = len(SCREAMING_SNAKE_CASE_ ) a = len(matrix[0] ) a = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for row in range(SCREAMING_SNAKE_CASE_ ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ): a = matrix[col][row] / matrix[row][row] for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows a = True for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ): if matrix[i][row] != 0: a = matrix[i], matrix[row] a = False break if reduce: rank -= 1 for i in range(SCREAMING_SNAKE_CASE_ ): a = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = [0] * len(SCREAMING_SNAKE_CASE_ ) for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # use last results for better performance - dynamic programming a = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: a = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 a = j return prefix_result def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' return max(prefix_function(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
import math import qiskit def UpperCAmelCase__ ( UpperCAmelCase__ :int = 1 , UpperCAmelCase__ :int = 1 , UpperCAmelCase__ :int = 1 ): '''simple docstring''' if ( isinstance(__lowercase , __lowercase ) or isinstance(__lowercase , __lowercase ) or isinstance(__lowercase , __lowercase ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(__lowercase ) != input_a) or (math.floor(__lowercase ) != input_a) or (math.floor(__lowercase ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers a = qiskit.QuantumRegister(4 , "qr" ) a = qiskit.ClassicalRegister(2 , "cr" ) # list the entries a = [input_a, input_a, carry_in] a = qiskit.QuantumCircuit(__lowercase , __lowercase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(__lowercase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(__lowercase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(__lowercase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits a = qiskit.Aer.get_backend("aer_simulator" ) a = qiskit.execute(__lowercase , __lowercase , shots=10_00 ) return job.result().get_counts(__lowercase ) if __name__ == "__main__": print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
from collections.abc import Generator from math import sin def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' if len(__snake_case ) != 32: raise ValueError("Input must be of length 32" ) a = b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) a = format(__snake_case , "08x" )[-8:] a = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = b"" for char in message: bit_string += format(__snake_case , "08b" ).encode("utf-8" ) a = format(len(__snake_case ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__snake_case ) % 5_12 != 4_48: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' if len(__snake_case ) % 5_12 != 0: raise ValueError("Input must have length that\'s a multiple of 512" ) for pos in range(0 , len(__snake_case ) , 5_12 ): a = bit_string[pos : pos + 5_12] a = [] for i in range(0 , 5_12 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) a = format(__snake_case , "032b" ) a = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(__snake_case , 2 ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Dict ): '''simple docstring''' return (a + b) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :List[str] ): '''simple docstring''' if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] ): '''simple docstring''' a = preprocess(__snake_case ) a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states a = 0x67_452_301 a = 0xef_cda_b89 a = 0x98_bad_cfe a = 0x10_325_476 a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__snake_case ): a = aa a = ba a = ca a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f a = d ^ (b & (c ^ d)) a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f a = c ^ (d & (b ^ c)) a = (5 * i + 1) % 16 elif i <= 47: a = b ^ c ^ d a = (3 * i + 5) % 16 else: a = c ^ (b | not_aa(__snake_case )) a = (7 * i) % 16 a = (f + a + added_consts[i] + block_words[g]) % 2**32 a = d a = c a = b a = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) ) # Add hashed chunk to running total a = sum_aa(__snake_case , __snake_case ) a = sum_aa(__snake_case , __snake_case ) a = sum_aa(__snake_case , __snake_case ) a = sum_aa(__snake_case , __snake_case ) a = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) return digest if __name__ == "__main__": import doctest doctest.testmod()
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: A_ : str = None A_ : Union[str, Any] = logging.get_logger(__name__) A_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ : Union[str, Any] = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } A_ : List[str] = { '''moussaKam/mbarthez''': 10_24, '''moussaKam/barthez''': 10_24, '''moussaKam/barthez-orangesum-title''': 10_24, } A_ : str = '''▁''' class _lowercase ( __a ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase = BarthezTokenizer def __init__( self : int , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : List[str]="</s>" , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : Tuple="<s>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="<mask>" , **__lowerCAmelCase : List[str] , ) -> List[str]: """simple docstring""" a = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token super().__init__( snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) a = vocab_file a = False if not self.vocab_file else True def A ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Dict: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Union[str, Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(snake_case__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) return (out_vocab_file,)
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[str] = logging.get_logger(__name__) A_ : Dict = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class _lowercase ( __lowerCamelCase ): _UpperCAmelCase = '''bridgetower_vision_model''' def __init__( self : Tuple , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Tuple=288 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : str=1E-05 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=False , **__lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" super().__init__(**a_ ) a = hidden_size a = num_hidden_layers a = num_channels a = patch_size a = image_size a = initializer_factor a = layer_norm_eps a = stop_gradient a = share_layernorm a = remove_last_layer @classmethod def A ( cls : List[Any] , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" a = cls.get_config_dict(a_ , **a_ ) if config_dict.get("model_type" ) == "bridgetower": a = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class _lowercase ( __lowerCamelCase ): _UpperCAmelCase = '''bridgetower_text_model''' def __init__( self : Any , __lowerCAmelCase : Any=5_0265 , __lowerCAmelCase : int=768 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Tuple=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=514 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=1E-05 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]="absolute" , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**a_ ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = initializer_factor a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = layer_norm_eps a = position_embedding_type a = use_cache a = pad_token_id a = bos_token_id a = eos_token_id @classmethod def A ( cls : int , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" a = cls.get_config_dict(a_ , **a_ ) if config_dict.get("model_type" ) == "bridgetower": a = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(a_ , **a_ ) class _lowercase ( __lowerCamelCase ): _UpperCAmelCase = '''bridgetower''' def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Optional[Any]=768 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : int=1E-05 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple="add" , __lowerCAmelCase : int=12 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : List[str] , ) -> Optional[int]: """simple docstring""" a = kwargs.pop("text_config_dict" , a_ ) a = kwargs.pop("vision_config_dict" , a_ ) super().__init__(**a_ ) a = share_cross_modal_transformer_layers a = hidden_act a = hidden_size a = initializer_factor a = layer_norm_eps a = share_link_tower_layers a = link_tower_type a = num_attention_heads a = num_hidden_layers a = tie_word_embeddings a = init_layernorm_from_vision_encoder if text_config is None: a = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: a = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) a = BridgeTowerTextConfig(**a_ ) a = BridgeTowerVisionConfig(**a_ ) @classmethod def A ( cls : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ ) def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.text_config.to_dict() a = self.vision_config.to_dict() a = self.__class__.model_type return output
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=18 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : Dict=400 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=None , ) -> Any: """simple docstring""" a = size if size is not None else {"shortest_edge": 20} a = crop_size if crop_size is not None else {"height": 18, "width": 18} a = parent a = batch_size a = num_channels a = image_size a = min_resolution a = max_resolution a = do_resize a = size a = do_center_crop a = crop_size def A ( self : str ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class _lowercase ( UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = MobileNetVaImageProcessor if is_vision_available() else None def A ( self : int ) -> Optional[Any]: """simple docstring""" a = MobileNetVaImageProcessingTester(self ) @property def A ( self : Tuple ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Any: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "size" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "do_center_crop" ) ) self.assertTrue(hasattr(__lowerCAmelCase , "crop_size" ) ) def A ( self : Any ) -> Dict: """simple docstring""" a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def A ( self : List[Any] ) -> Any: """simple docstring""" pass def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A ( self : int ) -> int: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def A ( self : Union[str, Any] ) -> Tuple: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched a = image_processing(__lowerCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :str , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Dict , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[str] , ): '''simple docstring''' a = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } a , a = input_paths_and_base_extractors[compression_format] if input_path is None: a = F"""for \'{compression_format}\' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_A ) assert base_extractor.is_extractable(_A ) a = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(_A , _A ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name a = file_path.read_text(encoding="utf-8" ) else: a = output_path.read_text(encoding="utf-8" ) a = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive" , [ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :str , ): '''simple docstring''' a = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } a = input_paths[compression_format] if input_path is None: a = F"""for \'{compression_format}\' compression_format, """ if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_A ) a = Extractor.infer_extractor_format(_A ) assert extractor_format is not None a = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(_A , _A , _A ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name a = file_path.read_text(encoding="utf-8" ) else: a = output_path.read_text(encoding="utf-8" ) a = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :str ): '''simple docstring''' import tarfile a = tmp_path / "data_dot_dot" directory.mkdir() a = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(_A , "w" ) as f: f.add(_A , arcname=os.path.join(".." , text_file.name ) ) return path @pytest.fixture def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' import tarfile a = tmp_path / "data_sym_link" directory.mkdir() a = directory / "tar_file_with_sym_link.tar" os.symlink(".." , directory / "subdir" , target_is_directory=_A ) with tarfile.TarFile(_A , "w" ) as f: f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } a = insecure_tar_files[insecure_tar_file] a = tmp_path / "extracted" TarExtractor.extract(_A , _A ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 a = ( b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(_A ) assert zipfile.is_zipfile(str(_A ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(_A ) # but we're right
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A_ : int = logging.get_logger(__name__) A_ : Dict = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :int ): '''simple docstring''' for attribute in key.split("." ): a = getattr(A_ , A_ ) if weight_type is not None: a = getattr(A_ , A_ ).shape else: a = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Optional[Any] ): '''simple docstring''' a = [] a = fairseq_model.state_dict() a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == "group" , ) a = True else: for key, mapped_key in MAPPING.items(): a = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): a = True if "*" in mapped_key: a = name.split(A_ )[0].split("." )[-2] a = mapped_key.replace("*" , A_ ) if "weight_g" in name: a = "weight_g" elif "weight_v" in name: a = "weight_v" elif "weight" in name: a = "weight" elif "bias" in name: a = "bias" else: a = None set_recursively(A_ , A_ , A_ , A_ , A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def UpperCAmelCase__ ( UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :int , UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :Dict , UpperCAmelCase__ :str ): '''simple docstring''' a = full_name.split("conv_layers." )[-1] a = name.split("." ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A_ ) @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :str , UpperCAmelCase__ :List[str]=None , UpperCAmelCase__ :Optional[int]=None , UpperCAmelCase__ :int=True ): '''simple docstring''' if config_path is not None: a = HubertConfig.from_pretrained(A_ ) else: a = HubertConfig() if is_finetuned: if dict_path: a = Dictionary.load(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a = target_dict.pad_index a = target_dict.bos_index a = target_dict.eos_index a = len(target_dict.symbols ) a = os.path.join(A_ , "vocab.json" ) if not os.path.isdir(A_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A_ ) ) return os.makedirs(A_ , exist_ok=A_ ) with open(A_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , A_ ) a = WavaVecaCTCTokenizer( A_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A_ , ) a = True if config.feat_extract_norm == "layer" else False a = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , ) a = WavaVecaProcessor(feature_extractor=A_ , tokenizer=A_ ) processor.save_pretrained(A_ ) a = HubertForCTC(A_ ) else: a = HubertModel(A_ ) if is_finetuned: a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) a = model[0].eval() recursively_load_weights(A_ , A_ , A_ ) hf_wavavec.save_pretrained(A_ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) A_ : Dict = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar A_ : Dict = TypeVar('''T''') class _lowercase ( Generic[T] ): _UpperCAmelCase = 42 # Cache store of keys _UpperCAmelCase = 42 # References of the keys in cache _UpperCAmelCase = 10 # Maximum capacity of cache def __init__( self : Optional[int] , __lowerCAmelCase : int ) -> List[str]: """simple docstring""" a = deque() a = set() if not n: a = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: a = n def A ( self : Any , __lowerCAmelCase : int ) -> Optional[int]: """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: a = self.dq_store.pop() self.key_reference.remove(_A ) else: self.dq_store.remove(_A ) self.dq_store.appendleft(_A ) self.key_reference.add(_A ) def A ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" for k in self.dq_store: print(_A ) def __repr__( self : int ) -> str: """simple docstring""" return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() A_ : LRUCache[str | int] = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
from torch import nn def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase ( lowercase_ ): def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int=13 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=37 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Dict=512 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[Any]="None" , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Tuple=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = relative_attention a = position_biased_input a = pos_att_type a = scope def A ( self : Tuple ) -> Tuple: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> Any: """simple docstring""" return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def A ( self : int , __lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def A ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> List[str]: """simple docstring""" a = DebertaVaModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )[0] a = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )[0] a = model(lowerCamelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def A ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" a = DebertaVaForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ) -> Any: """simple docstring""" a = self.num_labels a = DebertaVaForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCamelCase_ ) def A ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Any: """simple docstring""" a = self.num_labels a = DebertaVaForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = DebertaVaForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" a = DebertaVaForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Tuple ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() ( a ) = config_and_inputs a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase_, lowercase_, unittest.TestCase ): _UpperCAmelCase = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" a = DebertaVaModelTester(self ) a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def A ( self : Dict ) -> int: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase_ ) def A ( self : Any ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase_ ) def A ( self : Tuple ) -> List[str]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase_ ) def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase_ ) def A ( self : str ) -> List[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase_ ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase_ ) @slow def A ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = DebertaVaModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def A ( self : Dict ) -> Any: """simple docstring""" pass @slow def A ( self : List[str] ) -> List[str]: """simple docstring""" a = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" ) a = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0] # compare the actual values for a slice. a = torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A_ : Union[str, Any] = logging.get_logger(__name__) A_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} A_ : str = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } A_ : Tuple = { '''abeja/gpt-neox-japanese-2.7b''': 20_48, } def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' with open(A__ , "r" , encoding="utf-8" ) as f: a = json.loads(f.read() ) a = collections.OrderedDict() a = collections.OrderedDict() a = collections.OrderedDict() with open(A__ , "r" , encoding="utf-8" ) as f: a = f.readlines() a = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(A__ ): a = b a = idx for wd in b: a = idx return vocab, raw_vocab, ids_to_tokens, emoji class _lowercase ( __lowerCamelCase ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str="<|endoftext|>" , __lowerCAmelCase : Dict="<|endoftext|>" , __lowerCAmelCase : List[str]="<|startoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]=False , **__lowerCAmelCase : Tuple , ) -> str: """simple docstring""" super().__init__( unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , ) if not os.path.isfile(UpperCamelCase_ ): raise ValueError( f"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained""" " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(UpperCamelCase_ ): raise ValueError( f"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google""" " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) a = do_clean_text a , a , a , a = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ ) a = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def A ( self : Tuple ) -> List[Any]: """simple docstring""" return len(self.raw_vocab ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def A ( self : Optional[int] , __lowerCAmelCase : Dict ) -> Any: """simple docstring""" return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text ) def A ( self : str , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) ) def A ( self : List[Any] , __lowerCAmelCase : Any ) -> Dict: """simple docstring""" return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ ) def A ( self : List[Any] , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" a = "".join(UpperCamelCase_ ).strip() return out_string def A ( self : Optional[Any] , __lowerCAmelCase : "Conversation" ) -> Any: """simple docstring""" a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] ) if len(UpperCamelCase_ ) > self.model_max_length: a = input_ids[-self.model_max_length :] return input_ids def A ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> List[Any]: """simple docstring""" a = 0 if os.path.isdir(UpperCamelCase_ ): a = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: a = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) a = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) a = token_index writer.write(",".join(UpperCamelCase_ ) + "\n" ) index += 1 with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , UpperCamelCase_ ) return vocab_file, emoji_file class _lowercase ( __lowerCamelCase ): def __init__( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> List[Any]: """simple docstring""" a = vocab # same as swe a = ids_to_tokens # same as bpe a = emoji a = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] ) a = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) a = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) a = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) a = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) a = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) a = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) a = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" a = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" a = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : int ) -> Optional[Any]: """simple docstring""" return len(self.ids_to_tokens ) def A ( self : List[str] , __lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" a = self.content_repattera.sub("<URL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<EMAIL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<TEL>" , UpperCamelCase_ ) a = self.content_repattera.sub("<DATE>" , UpperCamelCase_ ) a = self.content_repattera.sub("<DATE>" , UpperCamelCase_ ) a = self.content_repattera.sub("<PRICE>" , UpperCamelCase_ ) a = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: a = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=False ) -> List[Any]: """simple docstring""" a = text.replace(" " , "<SP>" ) a = text.replace(" " , "<SP>" ) a = text.replace("\r\n" , "<BR>" ) a = text.replace("\n" , "<BR>" ) a = text.replace("\r" , "<BR>" ) a = text.replace("\t" , "<TAB>" ) a = text.replace("—" , "ー" ) a = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: a = text.replace(UpperCamelCase_ , UpperCamelCase_ ) if clean: a = self.clean_text(UpperCamelCase_ ) def check_simbol(__lowerCAmelCase : int ): a = x.encode() if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2: a = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc_2a1 and c <= 0Xc_2bf) or (c >= 0Xc_780 and c <= 0Xc_783) or (c >= 0Xc_ab9 and c <= 0Xc_bbf) or (c >= 0Xc_c80 and c <= 0Xc_da2) ): return True return False def checkuae(__lowerCAmelCase : Optional[Any] ): a = x.encode() if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3: a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe28_080 and c <= 0Xe2b_07f: return True return False a = 0 a = [] while pos < len(UpperCamelCase_ ): a = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 a = [] # (token_id, token, pos) for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ): a = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(UpperCamelCase_ ) > 2: a = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(UpperCamelCase_ ) > 0: # the smallest token_id is adopted a , a , a = sorted(UpperCamelCase_ , key=lambda __lowerCAmelCase : x[0] )[0] result.append(UpperCamelCase_ ) a = e else: a = pos + 1 a = text[pos:end] if check_simbol(UpperCamelCase_ ): result.append("<KIGOU>" ) elif checkuae(UpperCamelCase_ ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) a = end return result def A ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]="\n" ) -> List[str]: """simple docstring""" a = [] a = [] a = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(UpperCamelCase_ ) > 0: words.append(bytearray(UpperCamelCase_ ).decode("utf-8" , errors="replace" ) ) a = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(UpperCamelCase_ ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: words.append(bytearray(UpperCamelCase_ ).decode("utf-8" , errors="replace" ) ) a = "".join(UpperCamelCase_ ) return text
713
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self : List[str] ) -> List[str]: """simple docstring""" a = "" a = "" a = [] a = 0 a = 256 a = 0 a = 0 a = 0 a = 0 def A ( self : Optional[Any] , __lowerCAmelCase : Any ) -> int: """simple docstring""" a = cva.imread(__lowerCAmelCase , 0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) a = np.sum(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCAmelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def A ( self : Any ) -> int: """simple docstring""" plt.hist(self.img.ravel() , 256 , [0, 256] ) def A ( self : Any ) -> int: """simple docstring""" cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": A_ : List[Any] = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') A_ : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
32
0
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer A_ : Optional[int] = logging.get_logger(__name__) class _lowercase ( _UpperCamelCase ): _UpperCAmelCase = '''AutoTokenizer''' _UpperCAmelCase = ['''tokenizer'''] _UpperCAmelCase = { '''semantic_prompt''': 1, '''coarse_prompt''': 2, '''fine_prompt''': 2, } def __init__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=None ) -> List[str]: """simple docstring""" super().__init__(__lowerCAmelCase ) a = speaker_embeddings @classmethod def A ( cls : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple="speaker_embeddings_path.json" , **__lowerCAmelCase : Dict ) -> Dict: """simple docstring""" if speaker_embeddings_dict_path is not None: a = get_file_from_repo( __lowerCAmelCase , __lowerCAmelCase , subfolder=kwargs.pop("subfolder" , __lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __lowerCAmelCase ) , force_download=kwargs.pop("force_download" , __lowerCAmelCase ) , proxies=kwargs.pop("proxies" , __lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , __lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __lowerCAmelCase ) , revision=kwargs.pop("revision" , __lowerCAmelCase ) , ) if speaker_embeddings_path is None: logger.warning( f"""`{os.path.join(__lowerCAmelCase , __lowerCAmelCase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" ) a = None else: with open(__lowerCAmelCase ) as speaker_embeddings_json: a = json.load(__lowerCAmelCase ) else: a = None a = AutoTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) return cls(tokenizer=__lowerCAmelCase , speaker_embeddings=__lowerCAmelCase ) def A ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]="speaker_embeddings_path.json" , __lowerCAmelCase : str="speaker_embeddings" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Tuple , ) -> Union[str, Any]: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(__lowerCAmelCase , __lowerCAmelCase , "v2" ) , exist_ok=__lowerCAmelCase ) a = {} a = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": a = self._load_voice_preset(__lowerCAmelCase ) a = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , __lowerCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=__lowerCAmelCase , ) a = os.path.join(__lowerCAmelCase , f"""{prompt_key}_{key}.npy""" ) a = tmp_dict with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def A ( self : Any , __lowerCAmelCase : str = None , **__lowerCAmelCase : int ) -> Optional[Any]: """simple docstring""" a = self.speaker_embeddings[voice_preset] a = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" ) a = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __lowerCAmelCase ) , force_download=kwargs.pop("force_download" , __lowerCAmelCase ) , proxies=kwargs.pop("proxies" , __lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , __lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __lowerCAmelCase ) , revision=kwargs.pop("revision" , __lowerCAmelCase ) , ) if path is None: raise ValueError( f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.""" ) a = np.load(__lowerCAmelCase ) return voice_preset_dict def A ( self : Optional[Any] , __lowerCAmelCase : Optional[dict] = None ) -> List[Any]: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" ) def __call__( self : Any , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict="pt" , __lowerCAmelCase : str=256 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=False , **__lowerCAmelCase : List[str] , ) -> Tuple: """simple docstring""" if voice_preset is not None and not isinstance(__lowerCAmelCase , __lowerCAmelCase ): if ( isinstance(__lowerCAmelCase , __lowerCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): a = self._load_voice_preset(__lowerCAmelCase ) else: if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not voice_preset.endswith(".npz" ): a = voice_preset + ".npz" a = np.load(__lowerCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(__lowerCAmelCase , **__lowerCAmelCase ) a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) a = self.tokenizer( __lowerCAmelCase , return_tensors=__lowerCAmelCase , padding="max_length" , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) if voice_preset is not None: a = voice_preset return encoded_text
714
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = 42 _UpperCAmelCase = 42 def __init__( self : Optional[Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : ScoreSdeVeScheduler ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) @torch.no_grad() def __call__( self : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 2000 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" a = self.unet.config.sample_size a = (batch_size, 3, img_size, img_size) a = self.unet a = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma a = sample.to(self.device ) self.scheduler.set_timesteps(__lowerCAmelCase ) self.scheduler.set_sigmas(__lowerCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): a = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample # prediction step a = model(__lowerCAmelCase , __lowerCAmelCase ).sample a = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) a , a = output.prev_sample, output.prev_sample_mean a = sample_mean.clamp(0 , 1 ) a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__lowerCAmelCase )
32
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A_ : Optional[int] = logging.get_logger(__name__) A_ : Optional[Any] = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''t5''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=3_2128 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : Optional[int]=64 , __lowerCAmelCase : Any=2048 , __lowerCAmelCase : List[Any]=6 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Dict=128 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[Any]=1E-6 , __lowerCAmelCase : str=1.0 , __lowerCAmelCase : Optional[int]="relu" , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=True , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=1 , **__lowerCAmelCase : Union[str, Any] , ) -> List[str]: """simple docstring""" a = vocab_size a = d_model a = d_kv a = d_ff a = num_layers a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry a = num_heads a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = feed_forward_proj a = use_cache a = self.feed_forward_proj.split("-" ) a = act_info[-1] a = act_info[0] == "gated" if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": a = "gelu_new" super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , ) class _lowercase ( UpperCAmelCase__ ): @property def A ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" a = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: a = "past_encoder_sequence + sequence" a = {0: "batch"} a = {0: "batch", 1: "past_decoder_sequence + sequence"} else: a = {0: "batch", 1: "decoder_sequence"} a = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="inputs" ) return common_inputs @property def A ( self : Union[str, Any] ) -> int: """simple docstring""" return 13
715
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Optional[int] = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: a = year // 1_00 a = (5 * (century % 4) + 2) % 7 a = year % 1_00 a = centurian % 12 a = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 a = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) a = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
32
0
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowercase ( __lowerCAmelCase, unittest.TestCase ): _UpperCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def A ( self : int , __lowerCAmelCase : List[Any]=0 ) -> Optional[Any]: """simple docstring""" a = np.random.RandomState(lowerCAmelCase_ ) a = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def A ( self : Any ) -> Optional[Any]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Optional[Any] ) -> str: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : int ) -> Union[str, Any]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Tuple ) -> List[str]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : int ) -> Tuple: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = pipe(**lowerCAmelCase_ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) a = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = 3 * [inputs["prompt"]] # forward a = pipe(**lowerCAmelCase_ ) a = output.images[0, -3:, -3:, -1] a = self.get_dummy_inputs() a = 3 * [inputs.pop("prompt" )] a = pipe.tokenizer( lowerCAmelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="np" , ) a = text_inputs["input_ids"] a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] a = prompt_embeds # forward a = pipe(**lowerCAmelCase_ ) a = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = self.get_dummy_inputs() a = 3 * ["this is a negative prompt"] a = negative_prompt a = 3 * [inputs["prompt"]] # forward a = pipe(**lowerCAmelCase_ ) a = output.images[0, -3:, -3:, -1] a = self.get_dummy_inputs() a = 3 * [inputs.pop("prompt" )] a = [] for p in [prompt, negative_prompt]: a = pipe.tokenizer( lowerCAmelCase_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="np" , ) a = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) a , a = embeds # forward a = pipe(**lowerCAmelCase_ ) a = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class _lowercase ( unittest.TestCase ): @property def A ( self : Union[str, Any] ) -> int: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def A ( self : Union[str, Any] ) -> List[str]: """simple docstring""" a = ort.SessionOptions() a = False return options def A ( self : Any ) -> str: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = "A painting of a squirrel eating a burger" np.random.seed(0 ) a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" ) a = output.images a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def A ( self : int ) -> Optional[int]: """simple docstring""" a = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) a = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = "open neural network exchange" a = np.random.RandomState(0 ) a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type="np" ) a = output.images a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def A ( self : int ) -> Tuple: """simple docstring""" a = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) a = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = "open neural network exchange" a = np.random.RandomState(0 ) a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase_ , output_type="np" ) a = output.images a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def A ( self : str ) -> int: """simple docstring""" a = 0 def test_callback_fn(__lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> None: a = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) a = latents[0, -3:, -3:, -1] a = np.array( [-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) a = latents[0, -3:, -3:, -1] a = np.array( [-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 a = False a = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) a = "Andromeda galaxy in a bottle" a = np.random.RandomState(0 ) pipe( prompt=lowerCAmelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , callback=lowerCAmelCase_ , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def A ( self : Dict ) -> Optional[Any]: """simple docstring""" a = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) assert pipe.safety_checker is None a = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) a = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None a = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None
716
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ : int = logging.getLogger(__name__) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _UpperCAmelCase = field( default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, ) @dataclass class _lowercase : _UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, ) _UpperCAmelCase = field( default=128, metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) }, ) _UpperCAmelCase = field( default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def UpperCAmelCase__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) a = import_module("tasks" ) try: a = getattr(UpperCAmelCase__ , model_args.task_type ) a = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task a = token_classification_task.get_labels(data_args.labels ) a = dict(enumerate(UpperCAmelCase__ ) ) a = len(UpperCAmelCase__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) a = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , ) # Get datasets a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) a = ( TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]: a = np.argmax(UpperCAmelCase__ , axis=2 ) a , a = preds.shape a = [[] for _ in range(UpperCAmelCase__ )] a = [[] for _ in range(UpperCAmelCase__ )] for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict: a , a = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ), "precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ), "recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ), "f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ), } # Data collator a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer a = Trainer( model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = os.path.join(training_args.output_dir , "eval_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) results.update(UpperCAmelCase__ ) # Predict if training_args.do_predict: a = TokenClassificationDataset( token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) a , a , a = trainer.predict(UpperCAmelCase__ ) a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ ) a = os.path.join(training_args.output_dir , "test_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions a = os.path.join(training_args.output_dir , "test_predictions.txt" ) if trainer.is_world_process_zero(): with open(UpperCAmelCase__ , "w" ) as writer: with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f: token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return results def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' main() if __name__ == "__main__": main()
32
0
A_ : List[str] = 0 # The first color of the flag. A_ : Any = 1 # The second color of the flag. A_ : Union[str, Any] = 2 # The third color of the flag. A_ : Union[str, Any] = (red, white, blue) def UpperCAmelCase__ ( UpperCAmelCase__ :str ): if not sequence: return [] if len(UpperCAmelCase__ ) == 1: return list(UpperCAmelCase__ ) a = 0 a = len(UpperCAmelCase__ ) - 1 a = 0 while mid <= high: if sequence[mid] == colors[0]: a = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: a = sequence[high], sequence[mid] high -= 1 else: a = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(UpperCAmelCase__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() A_ : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip() A_ : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')] print(F"""{dutch_national_flag_sort(unsorted)}""")
717
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : str = logging.get_logger(__name__) A_ : List[Any] = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''rwkv''' _UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=5_0277 , __lowerCAmelCase : str=1024 , __lowerCAmelCase : Union[str, Any]=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=True , **__lowerCAmelCase : List[str] , ) -> List[Any]: """simple docstring""" a = vocab_size a = context_length a = hidden_size a = num_hidden_layers a = attention_hidden_size if attention_hidden_size is not None else hidden_size a = intermediate_size if intermediate_size is not None else 4 * hidden_size a = layer_norm_epsilon a = rescale_every a = use_cache a = bos_token_id a = eos_token_id super().__init__( tie_word_embeddings=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
32
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : List[str] = logging.get_logger(__name__) A_ : Union[str, Any] = { """google/pix2struct-textcaps-base""": ( """https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json""" ), } class _lowercase ( SCREAMING_SNAKE_CASE__ ): _UpperCAmelCase = '''pix2struct_text_model''' _UpperCAmelCase = ['''past_key_values'''] _UpperCAmelCase = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] , __lowerCAmelCase : Tuple=5_0244 , __lowerCAmelCase : str=768 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[int]=2048 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=1E-6 , __lowerCAmelCase : Optional[Any]=1.0 , __lowerCAmelCase : Union[str, Any]="gelu_new" , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : str=True , **__lowerCAmelCase : Union[str, Any] , ) -> Tuple: """simple docstring""" a = vocab_size a = hidden_size a = d_kv a = d_ff a = num_layers a = num_heads a = relative_attention_num_buckets a = relative_attention_max_distance a = dropout_rate a = layer_norm_epsilon a = initializer_factor a = use_cache a = eos_token_id a = decoder_start_token_id # for backwards compatibility a = dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def A ( cls : Union[str, Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : int ) -> List[Any]: """simple docstring""" cls._set_token_in_kwargs(snake_case__ ) a = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class _lowercase ( SCREAMING_SNAKE_CASE__ ): _UpperCAmelCase = '''pix2struct_vision_model''' def __init__( self : str , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : Optional[int]=768 , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : str=64 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : int="gelu_new" , __lowerCAmelCase : Optional[int]=1E-6 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Tuple=1E-10 , __lowerCAmelCase : Any=1.0 , __lowerCAmelCase : Tuple=4096 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Union[str, Any]=128 , **__lowerCAmelCase : Optional[int] , ) -> List[Any]: """simple docstring""" super().__init__(**snake_case__ ) a = hidden_size a = patch_embed_hidden_size a = d_ff a = dropout_rate a = num_hidden_layers a = num_attention_heads a = initializer_range a = initializer_factor a = attention_dropout a = layer_norm_eps a = dense_act_fn a = seq_len a = relative_attention_num_buckets a = relative_attention_max_distance a = d_kv @classmethod def A ( cls : int , __lowerCAmelCase : Dict , **__lowerCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" cls._set_token_in_kwargs(snake_case__ ) a = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": a = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class _lowercase ( SCREAMING_SNAKE_CASE__ ): _UpperCAmelCase = '''pix2struct''' _UpperCAmelCase = True def __init__( self : Dict , __lowerCAmelCase : str=None , __lowerCAmelCase : str=None , __lowerCAmelCase : str=1.0 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : int=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : str=True , **__lowerCAmelCase : Tuple , ) -> int: """simple docstring""" super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: a = {} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: a = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) a = PixaStructTextConfig(**snake_case__ ) a = PixaStructVisionConfig(**snake_case__ ) a = self.text_config.decoder_start_token_id a = self.text_config.pad_token_id a = self.text_config.eos_token_id a = initializer_factor a = initializer_range a = self.initializer_range a = self.initializer_range a = is_vqa @classmethod def A ( cls : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def A ( self : Any ) -> str: """simple docstring""" a = copy.deepcopy(self.__dict__ ) a = self.text_config.to_dict() a = self.vision_config.to_dict() a = self.__class__.model_type return output
718
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging A_ : List[str] = logging.get_logger(__name__) class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = ['''audio_values''', '''audio_mask'''] def __init__( self : List[Any] , __lowerCAmelCase : Dict=2048 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Dict=[16, 16] , __lowerCAmelCase : str=128 , __lowerCAmelCase : Optional[int]=4_4100 , __lowerCAmelCase : int=86 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : str=0.0 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]: """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) a = spectrogram_length a = num_channels a = patch_size a = feature_size // self.patch_size[1] a = n_fft a = sampling_rate // hop_length_to_sampling_rate a = sampling_rate a = padding_value a = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowerCAmelCase , norm="slaney" , mel_scale="slaney" , ).T def A ( self : List[str] , __lowerCAmelCase : np.array ) -> np.ndarray: """simple docstring""" a = spectrogram( __lowerCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , ) a = log_spec[:, :-1] a = log_spec - 2_0.0 a = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( "This feature extractor is set to support sampling rate" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) a = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) a = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): a = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): a = raw_speech.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis a = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): a = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask a = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: a = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] a = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding a = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch a = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) a = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): a = audio_features[i] a = feature # return as BatchFeature if return_attention_mask: a = {"audio_values": padded_audio_features, "audio_mask": audio_mask} else: a = {"audio_values": padded_audio_features} a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
32
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ : str = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : int = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys A_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
719
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=10 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=32 * 4 , __lowerCAmelCase : Dict=32 * 6 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=32 , ) -> Any: """simple docstring""" a = parent a = batch_size a = is_training a = use_auxiliary_loss a = num_queries a = num_channels a = min_size a = max_size a = num_labels a = mask_feature_size def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __lowerCAmelCase ) a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase ) a = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5 ).float() a = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long() a = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def A ( self : str ) -> Any: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a , a , a , a , a = self.prepare_config_and_inputs() a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def A ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" a = output.encoder_hidden_states a = output.pixel_decoder_hidden_states a = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers ) def A ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=False ) -> Tuple: """simple docstring""" with torch.no_grad(): a = MaskFormerModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase ) def A ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" a = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() def comm_check_on_output(__lowerCAmelCase : Tuple ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): a = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase ) a = model(__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) a = model( pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) comm_check_on_output(__lowerCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = MaskFormerModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def A ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : int ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer is not a generative model" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def A ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def A ( self : List[str] ) -> Any: """simple docstring""" pass def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: a = MaskFormerModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def A ( self : str ) -> Dict: """simple docstring""" a = (self.model_tester.min_size,) * 2 a = { "pixel_values": torch.randn((2, 3, *size) , device=__lowerCAmelCase ), "mask_labels": torch.randn((2, 10, *size) , device=__lowerCAmelCase ), "class_labels": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(), } a = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None ) def A ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase ) def A ( self : List[str] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ).to(__lowerCAmelCase ) a = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase ) self.assertTrue(outputs.attentions is not None ) def A ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss loss.backward() def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = self.all_model_classes[1] a , a , a , a , a = self.model_tester.prepare_config_and_inputs() a = True a = True a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ) a = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() a = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't a = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() a = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ : int = 1E-4 def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[int]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def A ( self : List[Any] ) -> Optional[Any]: """simple docstring""" a = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) a = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) a = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__lowerCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : List[Any] ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = prepare_img() a = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) a = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__lowerCAmelCase , (1, 3, 800, 1088) ) with torch.no_grad(): a = model(**__lowerCAmelCase ) # masks_queries_logits a = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) a = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] a = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) # class_queries_logits a = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) a = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) ) def A ( self : int ) -> Any: """simple docstring""" a = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(__lowerCAmelCase ) .eval() ) a = self.default_image_processor a = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) a = inputs["pixel_values"].to(__lowerCAmelCase ) a = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]] a = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]] with torch.no_grad(): a = model(**__lowerCAmelCase ) self.assertTrue(outputs.loss is not None )
32
0
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : Dict=400 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=True , ) -> int: """simple docstring""" a = size if size is not None else {"height": 18, "width": 18} a = parent a = batch_size a = num_channels a = image_size a = min_resolution a = max_resolution a = do_resize a = size a = do_normalize def A ( self : Any ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _lowercase ( _snake_case, unittest.TestCase ): _UpperCAmelCase = ImageGPTImageProcessor if is_vision_available() else None def A ( self : Tuple ) -> List[str]: """simple docstring""" a = ImageGPTImageProcessingTester(self ) @property def A ( self : Tuple ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> str: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , "clusters" ) ) self.assertTrue(hasattr(snake_case_ , "do_resize" ) ) self.assertTrue(hasattr(snake_case_ , "size" ) ) self.assertTrue(hasattr(snake_case_ , "do_normalize" ) ) def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def A ( self : Any ) -> Dict: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , obj[key] ) ) else: self.assertEqual(obj[key] , snake_case_ ) def A ( self : Tuple ) -> Optional[Any]: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a = os.path.join(snake_case_ , "image_processor.json" ) image_processor_first.to_json_file(snake_case_ ) a = self.image_processing_class.from_json_file(snake_case_ ).to_dict() a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , snake_case_ ) def A ( self : int ) -> Optional[Any]: """simple docstring""" a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(snake_case_ ) a = self.image_processing_class.from_pretrained(snake_case_ ).to_dict() a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , snake_case_ ) @unittest.skip("ImageGPT requires clusters at initialization" ) def A ( self : Tuple ) -> Dict: """simple docstring""" pass def UpperCAmelCase__ ( ): '''simple docstring''' a = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" ) a = Image.open(dataset[4]["file"] ) a = Image.open(dataset[5]["file"] ) a = [imagea, imagea] return images @require_vision @require_torch class _lowercase ( unittest.TestCase ): @slow def A ( self : str ) -> Tuple: """simple docstring""" a = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" ) a = prepare_images() # test non-batched a = image_processing(images[0] , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) a = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ ) # test batched a = image_processing(snake_case_ , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) a = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
720
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class _lowercase ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> int: """simple docstring""" a = [[1, 2, 4], [1, 2, 3, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def A ( self : Tuple ) -> Dict: """simple docstring""" a = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowerCAmelCase ): DisjunctiveConstraint(__lowerCAmelCase ) # fails here def A ( self : int ) -> Any: """simple docstring""" a = [[1, 2, 3], [1, 2, 4]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) a = stepped is True and completed is False and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(3 ) a = stepped is True and completed is True and reset is False self.assertTrue(__lowerCAmelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] a = DisjunctiveConstraint(__lowerCAmelCase ) a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() a , a , a = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) a , a , a = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) a , a , a = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
32
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _lowercase ( lowercase__, unittest.TestCase ): _UpperCAmelCase = LDMTextToImagePipeline _UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } _UpperCAmelCase = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } _UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase = False def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) a = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) a = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) a = CLIPTextModel(UpperCAmelCase__ ) a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) a = { '''unet''': unet, '''scheduler''': scheduler, '''vqvae''': vae, '''bert''': text_encoder, '''tokenizer''': tokenizer, } return components def A ( self : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=0 ) -> List[Any]: """simple docstring""" if str(UpperCAmelCase__ ).startswith("mps" ): a = torch.manual_seed(UpperCAmelCase__ ) else: a = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : str ) -> Optional[Any]: """simple docstring""" a = '''cpu''' # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = LDMTextToImagePipeline(**UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) a = self.get_dummy_inputs(UpperCAmelCase__ ) a = pipe(**UpperCAmelCase__ ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) a = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def A ( self : Any ) -> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict=torch.floataa , __lowerCAmelCase : str=0 ) -> Any: """simple docstring""" a = torch.manual_seed(UpperCAmelCase__ ) a = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) ) a = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Union[str, Any] ) -> Any: """simple docstring""" a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) a = self.get_inputs(UpperCAmelCase__ ) a = pipe(**UpperCAmelCase__ ).images a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) a = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) a = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class _lowercase ( unittest.TestCase ): def A ( self : int ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=torch.floataa , __lowerCAmelCase : Any=0 ) -> int: """simple docstring""" a = torch.manual_seed(UpperCAmelCase__ ) a = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 32, 32) ) a = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 50, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Any ) -> Union[str, Any]: """simple docstring""" a = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) a = self.get_inputs(UpperCAmelCase__ ) a = pipe(**UpperCAmelCase__ ).images[0] a = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) a = np.abs(expected_image - image ).max() assert max_diff < 1E-3
721
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' a = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def UpperCAmelCase__ ( ): '''simple docstring''' for base_num in range(99_99 , 49_99 , -1 ): a = 10_00_02 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_33 , 99 , -1 ): a = 1_00_20_03 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
32
0
from __future__ import annotations def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[Any] ): '''simple docstring''' a = sorted(numsa + numsa ) a , a = divmod(len(_lowerCAmelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[int] = [float(x) for x in input('''Enter the elements of first array: ''').split()] A_ : List[Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
700
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ), '''Tatoeba directory does not exist.''' ) class _lowercase ( unittest.TestCase ): @cached_property def A ( self : List[str] ) -> int: """simple docstring""" a = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase ) @slow def A ( self : Optional[int] ) -> List[str]: """simple docstring""" self.resolver.convert_models(["heb-eng"] ) @slow def A ( self : Dict ) -> Any: """simple docstring""" a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
32
0
from torch import nn class _lowercase ( nn.Module ): def __init__( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Dict: """simple docstring""" super().__init__() a = class_size a = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) a = nn.Linear(__lowerCamelCase , __lowerCamelCase ) def A ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Any: """simple docstring""" a = self.mlp(__lowerCamelCase ) return logits
701
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Any = logging.get_logger(__name__) A_ : Optional[int] = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = '''lilt''' def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any]=3_0522 , __lowerCAmelCase : str=768 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : List[Any]=3072 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : Union[str, Any]=1E-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : List[Any]="absolute" , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict=1024 , **__lowerCAmelCase : Dict , ) -> int: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = classifier_dropout a = channel_shrink_ratio a = max_ad_position_embeddings
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ): '''simple docstring''' assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0 ), F"""number_of_steps needs to be positive integer, your input {number_of_steps}""" if number_of_steps == 1: return 1 a , a = 1, 1 for _ in range(number_of_steps - 1 ): a , a = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ): '''simple docstring''' a = TaConfig.from_json_file(UpperCAmelCase__ ) print(F"""Building PyTorch model from configuration: {config}""" ) a = TaForConditionalGeneration(UpperCAmelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
32
0
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Dict=7 ): '''simple docstring''' a = None if token is not None: a = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) a = "636036" a = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" a = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' a = get_daily_ci_runs(__SCREAMING_SNAKE_CASE ) a = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": a = workflow_run["id"] break return workflow_run_id def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict , UpperCAmelCase__ :List[Any] ): '''simple docstring''' a = get_last_daily_ci_runs(__SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: a = get_artifacts_links(worflow_run_id=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: a = artifacts_links[artifact_name] download_artifact( artifact_name=__SCREAMING_SNAKE_CASE , artifact_url=__SCREAMING_SNAKE_CASE , output_dir=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Any ): '''simple docstring''' get_last_daily_ci_artifacts(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) a = {} for artifact_name in artifact_names: a = os.path.join(__SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(__SCREAMING_SNAKE_CASE ): a = {} with zipfile.ZipFile(__SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(__SCREAMING_SNAKE_CASE ): # read the file with z.open(__SCREAMING_SNAKE_CASE ) as f: a = f.read().decode("UTF-8" ) return results
703
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" a = max(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ) , b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
32
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( _UpperCAmelCase, unittest.TestCase ): _UpperCAmelCase = DebertaTokenizer _UpperCAmelCase = True _UpperCAmelCase = DebertaTokenizerFast def A ( self : str ) -> Tuple: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] a = dict(zip(A_ , range(len(A_ ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "[UNK]"} a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(A_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(A_ ) ) def A ( self : Any , **__lowerCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ ) def A ( self : Tuple , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" a = "lower newer" a = "lower newer" return input_text, output_text def A ( self : Any ) -> List[str]: """simple docstring""" a = self.get_tokenizer() a = "lower newer" a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] a = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) a = tokens + [tokenizer.unk_token] a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def A ( self : str ) -> Union[str, Any]: """simple docstring""" a = self.get_tokenizer() a = tokenizer("Hello" , "World" ) a = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , A_ ) @slow def A ( self : Tuple ) -> List[Any]: """simple docstring""" a = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) a = tokenizer.encode("sequence builders" , add_special_tokens=A_ ) a = tokenizer.encode("multi-sequence build" , add_special_tokens=A_ ) a = tokenizer.encode( "sequence builders" , add_special_tokens=A_ , add_prefix_space=A_ ) a = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=A_ , add_prefix_space=A_ ) a = tokenizer.build_inputs_with_special_tokens(A_ ) a = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def A ( self : List[str] ) -> Dict: """simple docstring""" a = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: a = tokenizer_class.from_pretrained("microsoft/deberta-base" ) a = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] a = tokenizer(A_ , padding=A_ ) a = [tokenizer.decode(A_ , skip_special_tokens=A_ ) for seq in encoding["input_ids"]] # fmt: off a = { "input_ids": [ [1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on a = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , A_ ) for expected, decoded in zip(A_ , A_ ): self.assertEqual(A_ , A_ )
704
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A_ : List[str] = (3, 9, -11, 0, 7, 5, 1, -1) A_ : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" a = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): a = Node(__lowerCAmelCase , self.head ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" a = self.head while node: yield node.data a = node.next_node def __len__( self : Tuple ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : Union[str, Any] ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def UpperCAmelCase__ ( UpperCAmelCase__ :SortedLinkedList , UpperCAmelCase__ :SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() A_ : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
32
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ : Optional[int] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[int] = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys A_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
705
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
32
0
import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ): @register_to_config def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : bool = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() a = nn.Embedding(__UpperCamelCase , __UpperCamelCase ) a = nn.Embedding(__UpperCamelCase , __UpperCamelCase ) a = False a = nn.Dropout(p=__UpperCamelCase ) a = TaConfig( vocab_size=__UpperCamelCase , d_model=__UpperCamelCase , num_heads=__UpperCamelCase , d_kv=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , feed_forward_proj=__UpperCamelCase , is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , ) a = nn.ModuleList() for lyr_num in range(__UpperCamelCase ): a = TaBlock(__UpperCamelCase ) self.encoders.append(__UpperCamelCase ) a = TaLayerNorm(__UpperCamelCase ) a = nn.Dropout(p=__UpperCamelCase ) def A ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = self.token_embedder(__UpperCamelCase ) a = encoder_input_tokens.shape[1] a = torch.arange(__UpperCamelCase , device=encoder_input_tokens.device ) x += self.position_encoding(__UpperCamelCase ) a = self.dropout_pre(__UpperCamelCase ) # inverted the attention mask a = encoder_input_tokens.size() a = self.get_extended_attention_mask(__UpperCamelCase , __UpperCamelCase ) for lyr in self.encoders: a = lyr(__UpperCamelCase , __UpperCamelCase )[0] a = self.layer_norm(__UpperCamelCase ) return self.dropout_post(__UpperCamelCase ), encoder_inputs_mask
706
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ : int = logging.get_logger(__name__) A_ : str = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ): _UpperCAmelCase = '''focalnet''' def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(**__lowerCAmelCase ) a = image_size a = patch_size a = num_channels a = embed_dim a = use_conv_embed a = hidden_sizes a = depths a = focal_levels a = focal_windows a = hidden_act a = mlp_ratio a = hidden_dropout_prob a = drop_path_rate a = use_layerscale a = layerscale_value a = use_post_layernorm a = use_post_layernorm_in_modulation a = normalize_modulator a = initializer_range a = layer_norm_eps a = encoder_stride a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] a , a = get_aligned_output_features_output_indices( out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
32
0
def UpperCAmelCase__ ( UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , UpperCAmelCase__ :float , ) -> float: '''simple docstring''' a = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("All input parameters must be positive" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("Relative densities cannot be greater than one" ) else: a = 1 - (matter_density + radiation_density + dark_energy) a = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) a = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation A_ : str = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
707
def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head: return True # split the list to two parts a , a = head.next, head while fast and fast.next: a = fast.next.next a = slow.next a = slow.next a = None # Don't forget here! But forget still works! # reverse the second part a = None while second: a = second.next a = node a = second a = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False a = node.next a = head.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :str ): '''simple docstring''' if not head or not head.next: return True # 1. Get the midpoint (slow) a = a = a = head while fast and fast.next: a , a = fast.next.next, slow.next # 2. Push the second half into the stack a = [slow.val] while slow.next: a = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False a = cur.next return True def UpperCAmelCase__ ( UpperCAmelCase__ :Any ): '''simple docstring''' if not head or not head.next: return True a = {} a = 0 while head: if head.val in d: d[head.val].append(UpperCAmelCase__ ) else: a = [pos] a = head.next pos += 1 a = pos - 1 a = 0 for v in d.values(): if len(UpperCAmelCase__ ) % 2 != 0: middle += 1 else: a = 0 for i in range(0 , len(UpperCAmelCase__ ) ): if v[i] + v[len(UpperCAmelCase__ ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
32
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: A_ : Tuple = None A_ : Optional[int] = logging.get_logger(__name__) A_ : List[Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ : Dict = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } A_ : int = { '''google/rembert''': 2_56, } A_ : Optional[int] = '''▁''' class _lowercase ( __A ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = RemBertTokenizer def __init__( self : int , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]="[CLS]" , __lowerCAmelCase : Optional[Any]="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Optional[Any]="[SEP]" , __lowerCAmelCase : int="<pad>" , __lowerCAmelCase : List[Any]="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Union[str, Any] , ) -> str: """simple docstring""" a = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , ) a = do_lower_case a = remove_space a = keep_accents a = vocab_file a = False if not self.vocab_file else True def A ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] = None ) -> Optional[int]: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : Dict = False ) -> Dict: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] def A ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] = None ) -> Tuple: """simple docstring""" a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = None ) -> Optional[Any]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__ ) ) return a = os.path.join( UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ): copyfile(self.vocab_file , UpperCamelCase__ ) return (out_vocab_file,)
708
import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _lowercase : def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]: """simple docstring""" a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = embedding_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope def A ( self : Optional[int] ) -> Optional[int]: """simple docstring""" a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> List[str]: """simple docstring""" return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" a = MobileBertModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) a = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str: """simple docstring""" a = MobileBertForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]: """simple docstring""" a = MobileBertForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any: """simple docstring""" a = MobileBertForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" a = self.num_labels a = MobileBertForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" a = self.num_labels a = MobileBertForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" a = self.num_choices a = MobileBertForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = True def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any: """simple docstring""" a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): a = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def A ( self : Optional[int] ) -> List[Any]: """simple docstring""" a = MobileBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def A ( self : int ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def A ( self : str ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def A ( self : str ) -> str: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def A ( self : int ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def A ( self : List[Any] ) -> int: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def A ( self : List[Any] ) -> Optional[int]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def A ( self : int ) -> Tuple: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ): '''simple docstring''' return torch.tensor( UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , ) A_ : Dict = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class _lowercase ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase ) a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): a = model(__lowerCAmelCase )[0] a = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , __lowerCAmelCase ) a = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=__lowerCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
32
0
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def UpperCAmelCase__ ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join a = "__test_patch_submodule_mock__" with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def UpperCAmelCase__ ( ): '''simple docstring''' assert _test_patching.open is open a = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , "open" , UpperCAmelCase__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching , "pandas.read_csv" , UpperCAmelCase__ ): pass def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , "len" , UpperCAmelCase__ ) is None with patch_submodule(_test_patching , "len" , UpperCAmelCase__ ): assert _test_patching.len is mock assert _test_patching.len is len def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_start_and_stop_mock__" a = patch_submodule(_test_patching , "open" , UpperCAmelCase__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def UpperCAmelCase__ ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join a = "__test_patch_submodule_successive_join__" a = "__test_patch_submodule_successive_dirname__" a = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ): with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def UpperCAmelCase__ ( ): '''simple docstring''' a = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , UpperCAmelCase__ ): pass with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , UpperCAmelCase__ ): pass
709
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _lowercase ( UpperCAmelCase__ ): def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) a = input_file.read() a = regexp.search(__lowerCAmelCase ) return match def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" with open(__lowerCAmelCase , encoding="utf-8" ) as input_file: a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) a = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a = regexp.finditer(__lowerCAmelCase ) a = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ): raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" ) def A ( self : Tuple ) -> Union[str, Any]: """simple docstring""" a = Path("./datasets" ) a = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowerCAmelCase ) ): raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
32
0
from math import factorial A_ : Optional[Any] = {str(digit): factorial(digit) for digit in range(10)} def UpperCAmelCase__ ( UpperCAmelCase__ :int ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCAmelCase__ ) ) def UpperCAmelCase__ ( UpperCAmelCase__ :int = 60 , UpperCAmelCase__ :int = 1_00_00_00 ): '''simple docstring''' if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length a = 0 # the cached sizes of the previous chains a = {} for start_chain_element in range(1 , UpperCAmelCase__ ): # The temporary set will contain the elements of the chain a = set() a = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. a = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(UpperCAmelCase__ ) chain_set_length += 1 a = digit_factorial_sum(UpperCAmelCase__ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] a = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
32
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING A_ : Dict = logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class _lowercase ( __UpperCAmelCase ): def __init__( self : Dict , **__lowerCAmelCase : Any ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase_ ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) self.check_model_type(lowerCAmelCase_ ) def A ( self : str , **__lowerCAmelCase : str ) -> str: """simple docstring""" a = {} a = {} a = {} # preprocess args if "points_per_batch" in kwargs: a = kwargs["points_per_batch"] if "points_per_crop" in kwargs: a = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: a = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: a = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: a = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: a = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: a = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: a = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: a = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: a = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: a = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: a = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Union[str, Any] , __lowerCAmelCase : List[str] , *__lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , **__lowerCAmelCase : str ) -> List[str]: """simple docstring""" return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_ ) def A ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : float = 512 / 1500 , __lowerCAmelCase : Optional[int] = 32 , __lowerCAmelCase : Optional[int] = 1 , ) -> Union[str, Any]: """simple docstring""" a = load_image(lowerCAmelCase_ ) a = self.image_processor.size["longest_edge"] a , a , a , a = self.image_processor.generate_crop_boxes( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) a = self.image_processor(images=lowerCAmelCase_ , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": a = self.get_inference_context() with inference_context(): a = self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device ) a = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) a = image_embeddings a = grid_points.shape[1] a = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ): a = grid_points[:, i : i + points_per_batch, :, :] a = input_labels[:, i : i + points_per_batch] a = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def A ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=0.8_8 , __lowerCAmelCase : Tuple=0.9_5 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Dict=1 , ) -> List[Any]: """simple docstring""" a = model_inputs.pop("input_boxes" ) a = model_inputs.pop("is_last" ) a = model_inputs.pop("original_sizes" ).tolist() a = model_inputs.pop("reshaped_input_sizes" ).tolist() a = self.model(**lowerCAmelCase_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks a = model_outputs["pred_masks"] a = self.image_processor.post_process_masks( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_ ) a = model_outputs["iou_scores"] a , a , a = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Tuple=0.7 , ) -> str: """simple docstring""" a = [] a = [] a = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) a = torch.cat(lowerCAmelCase_ ) a = torch.cat(lowerCAmelCase_ ) a , a , a , a = self.image_processor.post_process_for_mask_generation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) a = defaultdict(lowerCAmelCase_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowerCAmelCase_ ) a = {} if output_rle_mask: a = rle_mask if output_bboxes_mask: a = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
711
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _lowercase ( UpperCAmelCase__ ): _UpperCAmelCase = (UniPCMultistepScheduler,) _UpperCAmelCase = (('''num_inference_steps''', 25),) def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = { "num_train_timesteps": 1000, "beta_start": 0.0_0_0_1, "beta_end": 0.0_2, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__lowerCAmelCase ) return config def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals a = dummy_past_residuals[: new_scheduler.config.solver_order] a , a = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) a = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) a = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) a = dummy_past_residuals[: new_scheduler.config.solver_order] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any: """simple docstring""" if scheduler is None: a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = self.scheduler_classes[0] a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def A ( self : Any ) -> int: """simple docstring""" a = dict(self.forward_default_kwargs ) a = kwargs.pop("num_inference_steps" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config() a = scheduler_class(**__lowerCAmelCase ) a = self.dummy_sample a = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ): a = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] a = dummy_past_residuals[: scheduler.config.solver_order] a = scheduler.timesteps[5] a = scheduler.timesteps[6] a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A ( self : List[str] ) -> Dict: """simple docstring""" a = UniPCMultistepScheduler(**self.get_scheduler_config() ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 a = DPMSolverSinglestepScheduler.from_config(scheduler.config ) a = DEISMultistepScheduler.from_config(scheduler.config ) a = DPMSolverMultistepScheduler.from_config(scheduler.config ) a = UniPCMultistepScheduler.from_config(scheduler.config ) a = self.full_loop(scheduler=__lowerCAmelCase ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : List[Any] ) -> Dict: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def A ( self : Optional[Any] ) -> Any: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) a = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def A ( self : Optional[int] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def A ( self : Dict ) -> str: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def A ( self : Dict ) -> int: """simple docstring""" a = self.full_loop() a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def A ( self : Optional[int] ) -> int: """simple docstring""" a = self.full_loop(prediction_type="v_prediction" ) a = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def A ( self : Union[str, Any] ) -> str: """simple docstring""" a = self.scheduler_classes[0] a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) a = scheduler_class(**__lowerCAmelCase ) a = 10 a = self.dummy_model() a = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): a = model(__lowerCAmelCase , __lowerCAmelCase ) a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" for scheduler_class in self.scheduler_classes: a = self.get_scheduler_config(**__lowerCAmelCase ) a = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
32
0
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) A_ : Union[str, Any] = pytest.mark.integration @pytest.mark.parametrize("path" , ["paws", "csv"] ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[str] ): '''simple docstring''' inspect_dataset(a_ , a_ ) a = path + '''.py''' assert script_name in os.listdir(a_ ) assert "__pycache__" not in os.listdir(a_ ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" , ["accuracy"] ) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :Tuple ): '''simple docstring''' inspect_metric(a_ , a_ ) a = path + '''.py''' assert script_name in os.listdir(a_ ) assert "__pycache__" not in os.listdir(a_ ) @pytest.mark.parametrize( "path, config_name, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :str ): '''simple docstring''' a = get_dataset_config_info(a_ , config_name=a_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' with pytest.raises(a_ ): get_dataset_config_info(a_ , config_name=a_ ) @pytest.mark.parametrize( "path, expected" , [ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :List[str] ): '''simple docstring''' a = get_dataset_config_names(a_ ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" , [ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :List[Any] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Dict ): '''simple docstring''' a = get_dataset_infos(a_ ) assert list(infos.keys() ) == expected_configs a = expected_configs[0] assert expected_config in infos a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" , [ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :Any , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' a = get_dataset_infos(a_ ) assert expected_config in infos a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" , [ ("paws", None, ValueError), ] , ) def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[int] ): '''simple docstring''' with pytest.raises(a_ ): get_dataset_split_names(a_ , config_name=a_ )
712
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : str=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : List[str]=[10, 20, 30, 40] , __lowerCAmelCase : Any=[2, 2, 3, 2] , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=True , __lowerCAmelCase : str=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : int=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : int=["stage2", "stage3", "stage4"] , __lowerCAmelCase : List[str]=[2, 3, 4] , __lowerCAmelCase : str=None , ) -> Optional[Any]: """simple docstring""" a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def A ( self : Optional[Any] ) -> int: """simple docstring""" a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.num_labels ) a = self.get_config() return config, pixel_values, labels def A ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def A ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" a = ConvNextVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" a = ConvNextVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() a = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def A ( self : Union[str, Any] ) -> Dict: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values} return config, inputs_dict def A ( self : Dict ) -> Optional[int]: """simple docstring""" a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ): _UpperCAmelCase = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def A ( self : List[str] ) -> List[Any]: """simple docstring""" a = ConvNextVaModelTester(self ) a = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def A ( self : Tuple ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : List[Any] ) -> List[Any]: """simple docstring""" return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def A ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def A ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def A ( self : Optional[int] ) -> Dict: """simple docstring""" pass def A ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase ), ]: continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : Optional[int] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return for model_class in self.all_model_classes: a , a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) a = model(**__lowerCAmelCase ).loss loss.backward() def A ( self : List[Any] ) -> Any: """simple docstring""" a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCAmelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def A ( self : Dict ) -> Dict: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self : Tuple ) -> List[Any]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ): a = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def A ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def UpperCAmelCase__ ( ): '''simple docstring''' a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): @cached_property def A ( self : Optional[int] ) -> str: """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def A ( self : List[str] ) -> Union[str, Any]: """simple docstring""" a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCAmelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCAmelCase ) # verify the logits a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) a = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
32
0