code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowercase ( A_="ro" , A_="en" , A_="wmt16" , A_=None )-> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) a : List[Any] = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) a : Tuple = datasets.load_dataset(A_ , A_ ) if save_dir is None: a : Dict = F'''{dataset}-{pair}''' a : str = Path(A_ ) save_dir.mkdir(exist_ok=A_ ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets a : Any = "val" if split == "validation" else split a : Tuple = save_dir.joinpath(F'''{fn}.source''' ) a : Any = save_dir.joinpath(F'''{fn}.target''' ) a : Tuple = src_path.open("w+" ) a : List[Any] = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): a : Any = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
40
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' snake_case_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ ,snake_case_ = emb.weight.shape snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' ) snake_case_ = mam_aaa['''args'''] snake_case_ = mam_aaa['''model'''] snake_case_ = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case_ = args.share_decoder_input_output_embed snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case_ = SpeechaTextConfig( vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, ) snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F" but all the following weights are missing {missing}" ) if tie_embeds: snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
56
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer _A : List[Any] =logging.get_logger(__name__) _A : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} _A : Optional[Any] ={ '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } _A : Optional[Any] ={ '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class _lowercase ( _lowercase ): a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["""input_ids""", """attention_mask"""] a = RobertaTokenizer def __init__( self: List[str] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Dict=None , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]="replace" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Optional[Any]="</s>" , UpperCamelCase__: str="</s>" , UpperCamelCase__: List[Any]="<s>" , UpperCamelCase__: Union[str, Any]="<unk>" , UpperCamelCase__: Dict="<pad>" , UpperCamelCase__: Any="<mask>" , UpperCamelCase__: str=False , UpperCamelCase__: List[Any]=True , **UpperCamelCase__: int , ): super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) lowerCamelCase__ : Dict = add_prefix_space lowerCamelCase__ : Union[str, Any] = pre_tok_class(**UpperCamelCase__ ) lowerCamelCase__ : Any = add_prefix_space lowerCamelCase__ : List[Any] = """post_processor""" lowerCamelCase__ : Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ ) if tokenizer_component_instance: lowerCamelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCamelCase__ : Optional[Any] = tuple(state["""sep"""] ) if "cls" in state: lowerCamelCase__ : List[Any] = tuple(state["""cls"""] ) lowerCamelCase__ : int = False if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: lowerCamelCase__ : Optional[Any] = add_prefix_space lowerCamelCase__ : Any = True if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets: lowerCamelCase__ : Optional[Any] = trim_offsets lowerCamelCase__ : Tuple = True if changes_to_apply: lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase__ , state.pop("""type""" ) ) lowerCamelCase__ : Any = component_class(**UpperCamelCase__ ) setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ ) @property def lowerCamelCase_ ( self: Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase_ ( self: str , UpperCamelCase__: int ): lowerCamelCase__ : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value lowerCamelCase__ : str = value def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: Any , *UpperCamelCase__: Any , **UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : int = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ): lowerCamelCase__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Any , UpperCamelCase__: int=None ): lowerCamelCase__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ): lowerCamelCase__ : Optional[int] = [self.sep_token_id] lowerCamelCase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
41
'''simple docstring''' from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCamelCase ): snake_case_ = ["transformers", "torch", "note_seq"] def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
56
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]: _snake_case = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(__A , __A ) def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict: _snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _snake_case = s_dict.pop(__A ) elif "subsample" in key: _snake_case = s_dict.pop(__A ) def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__A , __A , bias=__A ) _snake_case = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]: _snake_case = torch.load(__A , map_location='cpu' ) _snake_case = mam_aaa['args'] _snake_case = mam_aaa['model'] _snake_case = state_dict['decoder.output_projection.weight'] remove_ignore_keys_(__A ) rename_keys(__A ) _snake_case = state_dict['decoder.embed_tokens.weight'].shape[0] _snake_case = args.share_decoder_input_output_embed _snake_case = [int(__A ) for i in args.conv_kernel_sizes.split(',' )] _snake_case = SpeechaTextConfig( vocab_size=__A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(__A ) , conv_channels=args.conv_channels , conv_kernel_sizes=__A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__A , num_beams=5 , max_length=200 , use_cache=__A , decoder_start_token_id=2 , early_stopping=__A , ) _snake_case = SpeechaTextForConditionalGeneration(__A ) _snake_case , _snake_case = model.model.load_state_dict(__A , strict=__A ) if len(__A ) > 0 and not set(__A ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F' but all the following weights are missing {missing}' ) if tie_embeds: _snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _snake_case = lm_head_weights model.save_pretrained(__A ) if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") lowercase : Any = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
42
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ): '''simple docstring''' __UpperCamelCase :Any = 0 __UpperCamelCase :int = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F'{solution() = }')
43
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a : Dict = logging.get_logger(__name__) a : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class a ( _lowerCamelCase ): snake_case_ = "marian" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ): snake_case_ = vocab_size snake_case_ = decoder_vocab_size or vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class a ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A_ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ = {0: '''batch'''} snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A_ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(lowercase_ , self ).outputs if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape snake_case_ = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ ,snake_case_ = self.num_layers snake_case_ = min(lowercase_ , lowercase_ ) snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ ,snake_case_ = self.num_layers snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ ) snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: snake_case_ = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: snake_case_ = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def A_ ( self : List[str] ): return 1e-4
56
0
"""simple docstring""" from __future__ import annotations import typing from collections import Counter def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> typing.Counter[int]: _lowerCAmelCase : typing.Counter[int] = Counter() for base in range(1 ,max_perimeter + 1 ): for perpendicular in range(_lowerCamelCase ,max_perimeter + 1 ): _lowerCAmelCase : Optional[int] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int: _lowerCAmelCase : int = pythagorean_triple(_lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"""Perimeter {solution()} has maximum solutions""")
44
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = CycleDiffusionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A_ ( self : Tuple ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ = CLIPTextModel(lowercase_ ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ): snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ = torch.manual_seed(lowercase_ ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def A_ ( self : Union[str, Any] ): snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A_ ( self : Union[str, Any] ): snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half''' ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Optional[int] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def A_ ( self : List[Any] ): return super().test_inference_batch_single_identical() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_save_load_optional_components() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class a ( unittest.TestCase ): def A_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Union[str, Any] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def A_ ( self : List[str] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2e-2
56
0
"""simple docstring""" from math import factorial, pi def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 30 ) -> float: if not isinstance(lowerCAmelCase__ , (int, float) ): raise ValueError('''maclaurin_sin() requires either an int or float for theta''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0: raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' ) __a = float(lowerCAmelCase__ ) __a = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) ) def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 30 ) -> float: if not isinstance(lowerCAmelCase__ , (int, float) ): raise ValueError('''maclaurin_cos() requires either an int or float for theta''' ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0: raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' ) __a = float(lowerCAmelCase__ ) __a = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(1_0)) print(maclaurin_sin(-1_0)) print(maclaurin_sin(1_0, 1_5)) print(maclaurin_sin(-1_0, 1_5)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(1_0, 1_5)) print(maclaurin_cos(-1_0, 1_5))
45
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a ( _lowerCamelCase ): snake_case_ = "big_bird" def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ): super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = rescale_embeddings snake_case_ = attention_type snake_case_ = use_bias snake_case_ = block_size snake_case_ = num_random_blocks snake_case_ = classifier_dropout class a ( _lowerCamelCase ): @property def A_ ( self : str ): if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
56
0
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowercase : def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=7 , lowercase=9 , lowercase=True , lowercase=True , lowercase=False , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=8 , lowercase=0.1 , lowercase=0.002 , lowercase=1 , lowercase=0 , lowercase=0 , lowercase=None , lowercase=None , ) -> Optional[Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = encoder_seq_length lowerCAmelCase = decoder_seq_length # For common tests lowerCAmelCase = self.decoder_seq_length lowerCAmelCase = is_training lowerCAmelCase = use_attention_mask lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = d_ff lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = dropout_rate lowerCAmelCase = initializer_factor lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = decoder_start_token_id lowerCAmelCase = None lowerCAmelCase = decoder_layers def _snake_case ( self ) -> str: return TaConfig.from_pretrained("""google/umt5-base""" ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> Optional[Any]: if attention_mask is None: lowerCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase ) if decoder_head_mask is None: lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase ) if cross_attn_head_mask is None: lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=lowercase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _snake_case ( self ) -> int: lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) lowerCAmelCase = self.get_config() lowerCAmelCase = config.num_attention_heads lowerCAmelCase = self.prepare_inputs_dict(lowercase , lowercase , lowercase ) return config, input_dict def _snake_case ( self ) -> int: lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def _snake_case ( self ) -> List[str]: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _snake_case ( self ) -> Optional[Any]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str: lowerCAmelCase = UMTaModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model( input_ids=lowercase , decoder_input_ids=lowercase , attention_mask=lowercase , decoder_attention_mask=lowercase , ) lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase ) lowerCAmelCase = result.last_hidden_state lowerCAmelCase = result.past_key_values lowerCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(lowercase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]: lowerCAmelCase = UMTaModel(config=lowercase ).get_decoder().to(lowercase ).eval() # first forward pass lowerCAmelCase = model(lowercase , use_cache=lowercase ) lowerCAmelCase = model(lowercase ) lowerCAmelCase = model(lowercase , use_cache=lowercase ) self.parent.assertTrue(len(lowercase ) == len(lowercase ) ) self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = model(lowercase )["""last_hidden_state"""] lowerCAmelCase = model(lowercase , past_key_values=lowercase )["""last_hidden_state"""] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def _snake_case ( self , lowercase , lowercase , ) -> str: lowerCAmelCase = UMTaModel(config=lowercase ).to(lowercase ).half().eval() lowerCAmelCase = model(**lowercase )["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(lowercase ).any().item() ) @require_torch class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = (UMTaForConditionalGeneration,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True # The small UMT5 model needs higher percentages for CPU/MP tests _SCREAMING_SNAKE_CASE = [0.8, 0.9] def _snake_case ( self ) -> str: lowerCAmelCase = UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def _snake_case ( self ) -> Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(lowercase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*lowercase ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] lowerCAmelCase = self.model_tester.prepare_config_and_inputs() lowerCAmelCase = config_and_inputs[0] lowerCAmelCase = UMTaForConditionalGeneration(lowercase ).eval() model.to(lowercase ) lowerCAmelCase = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowercase ), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ), } for attn_name, (name, mask) in zip(lowercase , head_masking.items() ): lowerCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": lowerCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=lowercase ) lowerCAmelCase = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=lowercase , return_dict_in_generate=lowercase , **lowercase , ) # We check the state of decoder_attentions and cross_attentions just from the last step lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def _snake_case ( self ) -> Union[str, Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=lowercase ).to(lowercase ) lowerCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=lowercase , legacy=lowercase ) lowerCAmelCase = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase ).input_ids # fmt: off lowerCAmelCase = torch.tensor( [ [ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(lowercase , lowercase ) lowerCAmelCase = model.generate(input_ids.to(lowercase ) ) lowerCAmelCase = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] lowerCAmelCase = tokenizer.batch_decode(lowercase ) self.assertEqual(lowercase , lowercase )
46
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str: '''simple docstring''' assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ = SqlDatasetReader( '''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} snake_case_ = features.copy() if features else default_expected_features snake_case_ = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con: snake_case_ = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() with pytest.raises(__UpperCAmelCase ): SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
56
0
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : Tuple = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class A__ ( A__ ): A__ = 'autoformer' A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Dict , _a : Optional[int] = None , _a : Optional[int] = None , _a : str = "student_t" , _a : str = "nll" , _a : int = 1 , _a : List[int] = [1, 2, 3, 4, 5, 6, 7] , _a : bool = True , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : int = 0 , _a : Optional[List[int]] = None , _a : Optional[List[int]] = None , _a : int = 64 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 2 , _a : int = 32 , _a : int = 32 , _a : str = "gelu" , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : float = 0.1 , _a : int = 100 , _a : float = 0.02 , _a : bool = True , _a : Dict=True , _a : int = 10 , _a : int = 25 , _a : int = 3 , **_a : str , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =prediction_length _SCREAMING_SNAKE_CASE =context_length if context_length is not None else prediction_length _SCREAMING_SNAKE_CASE =distribution_output _SCREAMING_SNAKE_CASE =loss _SCREAMING_SNAKE_CASE =input_size _SCREAMING_SNAKE_CASE =num_time_features _SCREAMING_SNAKE_CASE =lags_sequence _SCREAMING_SNAKE_CASE =scaling _SCREAMING_SNAKE_CASE =num_dynamic_real_features _SCREAMING_SNAKE_CASE =num_static_real_features _SCREAMING_SNAKE_CASE =num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =cardinality else: _SCREAMING_SNAKE_CASE =[0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_a ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _SCREAMING_SNAKE_CASE =embedding_dimension else: _SCREAMING_SNAKE_CASE =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _SCREAMING_SNAKE_CASE =num_parallel_samples # Transformer architecture configuration _SCREAMING_SNAKE_CASE =input_size * len(self.lags_sequence ) + self._number_of_features _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =encoder_attention_heads _SCREAMING_SNAKE_CASE =decoder_attention_heads _SCREAMING_SNAKE_CASE =encoder_ffn_dim _SCREAMING_SNAKE_CASE =decoder_ffn_dim _SCREAMING_SNAKE_CASE =encoder_layers _SCREAMING_SNAKE_CASE =decoder_layers _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =attention_dropout _SCREAMING_SNAKE_CASE =activation_dropout _SCREAMING_SNAKE_CASE =encoder_layerdrop _SCREAMING_SNAKE_CASE =decoder_layerdrop _SCREAMING_SNAKE_CASE =activation_function _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =use_cache # Autoformer _SCREAMING_SNAKE_CASE =label_length _SCREAMING_SNAKE_CASE =moving_average _SCREAMING_SNAKE_CASE =autocorrelation_factor super().__init__(is_encoder_decoder=_a , **_a ) @property def A ( self : Any ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
'''simple docstring''' from collections import defaultdict def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCAmelCase ) if ret % 2 == 0: cuts.append(__UpperCAmelCase ) return ret def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a ,a : Dict = 10, 9 a : Dict = defaultdict(list) a : dict[int, bool] = {} a : list[int] = [] a : Tuple = 0 a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
56
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Dict = ["""image_processor""", """tokenizer"""] lowerCamelCase_ : Optional[Any] = """FlavaImageProcessor""" lowerCamelCase_ : Optional[Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]: lowerCamelCase : Tuple = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase__ , ) lowerCamelCase : Dict = kwargs.pop("feature_extractor" ) lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : List[Any] = self.image_processor def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowerCamelCase : Union[str, Any] = self.tokenizer( text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) if images is not None: lowerCamelCase : str = self.image_processor( UpperCamelCase__ , return_image_mask=UpperCamelCase__ , return_codebook_pixels=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , ) if text is not None and images is not None: encoding.update(UpperCamelCase__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ ) def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]: return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @property def _lowercase ( self ) -> str: lowerCamelCase : Tuple = self.tokenizer.model_input_names lowerCamelCase : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowercase ( self ) -> Tuple: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , ) return self.image_processor_class @property def _lowercase ( self ) -> Any: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , ) return self.image_processor
48
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
56
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __snake_case :Any = False __snake_case :List[str] = logging.get_logger(__name__) __snake_case :int = '''ybelkada/fonts''' def __snake_case ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' '''Pix2StructImageProcessor. Please upgrade torch.''' ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): requires_backends(_UpperCAmelCase , ['''torch'''] ) _check_torch_version() __a = image_tensor.unsqueeze(0 ) __a = torch.nn.functional.unfold(_UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) __a = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _UpperCAmelCase , _UpperCAmelCase , -1 ) __a = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 36 , _UpperCAmelCase = "black" , _UpperCAmelCase = "white" , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = 5 , _UpperCAmelCase = None , _UpperCAmelCase = None , ): requires_backends(_UpperCAmelCase , '''vision''' ) # Add new lines so that each line is no more than 80 characters. __a = textwrap.TextWrapper(width=80 ) __a = wrapper.wrap(text=_UpperCAmelCase ) __a = '''\n'''.join(_UpperCAmelCase ) if font_bytes is not None and font_path is None: __a = io.BytesIO(_UpperCAmelCase ) elif font_path is not None: __a = font_path else: __a = hf_hub_download(_UpperCAmelCase , '''Arial.TTF''' ) __a = ImageFont.truetype(_UpperCAmelCase , encoding='''UTF-8''' , size=_UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __a = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _UpperCAmelCase ) ) __a , __a , __a , __a = temp_draw.textbbox((0, 0) , _UpperCAmelCase , _UpperCAmelCase ) # Create the actual image with a bit of padding around the text. __a = text_width + left_padding + right_padding __a = text_height + top_padding + bottom_padding __a = Image.new('''RGB''' , (image_width, image_height) , _UpperCAmelCase ) __a = ImageDraw.Draw(_UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=_UpperCAmelCase , fill=_UpperCAmelCase , font=_UpperCAmelCase ) return image def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): requires_backends(_UpperCAmelCase , '''vision''' ) # Convert to PIL image if necessary __a = to_pil_image(_UpperCAmelCase ) __a = render_text(_UpperCAmelCase , **_UpperCAmelCase ) __a = max(header_image.width , image.width ) __a = int(image.height * (new_width / image.width) ) __a = int(header_image.height * (new_width / header_image.width) ) __a = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary __a = to_numpy_array(_UpperCAmelCase ) if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST: __a = to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST ) return new_image class _A ( __UpperCAmelCase ): UpperCamelCase__ : List[str] = ['''flattened_patches'''] def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : int = 2_048 , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE) __a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} __a = do_normalize __a = do_convert_rgb __a = max_patches __a = is_vqa def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : dict , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' requires_backends(self.extract_flattened_patches , '''torch''') _check_torch_version() # convert to torch __a = to_channel_dimension_format(__SCREAMING_SNAKE_CASE , ChannelDimension.FIRST) __a = torch.from_numpy(__SCREAMING_SNAKE_CASE) __a , __a = patch_size['''height'''], patch_size['''width'''] __a , __a = get_image_size(__SCREAMING_SNAKE_CASE) # maximize scale s.t. __a = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) __a = max(min(math.floor(scale * image_height / patch_height) , __SCREAMING_SNAKE_CASE) , 1) __a = max(min(math.floor(scale * image_width / patch_width) , __SCREAMING_SNAKE_CASE) , 1) __a = max(num_feasible_rows * patch_height , 1) __a = max(num_feasible_cols * patch_width , 1) __a = torch.nn.functional.interpolate( image.unsqueeze(0) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE , antialias=__SCREAMING_SNAKE_CASE , ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] __a = torch_extract_patches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = patches.shape __a = patches_shape[1] __a = patches_shape[2] __a = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __a = patches.reshape([rows * columns, depth]) # [rows * columns, 1] __a = torch.arange(__SCREAMING_SNAKE_CASE).reshape([rows, 1]).repeat(1 , __SCREAMING_SNAKE_CASE).reshape([rows * columns, 1]) __a = torch.arange(__SCREAMING_SNAKE_CASE).reshape([1, columns]).repeat(__SCREAMING_SNAKE_CASE , 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __a = row_ids.to(torch.floataa) __a = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] __a = torch.cat([row_ids, col_ids, patches] , -1) # [max_patches, 2 + patch_height * patch_width * image_channels] __a = torch.nn.functional.pad(__SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)]).float() __a = to_numpy_array(__SCREAMING_SNAKE_CASE) return result def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' if image.dtype == np.uinta: __a = image.astype(np.floataa) # take mean across the whole `image` __a = np.mean(__SCREAMING_SNAKE_CASE) __a = np.std(__SCREAMING_SNAKE_CASE) __a = max(__SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape))) return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Optional[int] , ): '''simple docstring''' __a = do_normalize if do_normalize is not None else self.do_normalize __a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __a = patch_size if patch_size is not None else self.patch_size __a = max_patches if max_patches is not None else self.max_patches __a = self.is_vqa if kwargs.get('''data_format''' , __SCREAMING_SNAKE_CASE) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''') __a = make_list_of_images(__SCREAMING_SNAKE_CASE) if not valid_images(__SCREAMING_SNAKE_CASE): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') # PIL RGBA images are converted to RGB if do_convert_rgb: __a = [convert_to_rgb(__SCREAMING_SNAKE_CASE) for image in images] # All transformations expect numpy arrays. __a = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''') __a = kwargs.pop('''font_bytes''' , __SCREAMING_SNAKE_CASE) __a = kwargs.pop('''font_path''' , __SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): __a = [header_text] * len(__SCREAMING_SNAKE_CASE) __a = [ render_header(__SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=__SCREAMING_SNAKE_CASE , font_path=__SCREAMING_SNAKE_CASE) for i, image in enumerate(__SCREAMING_SNAKE_CASE) ] if do_normalize: __a = [self.normalize(image=__SCREAMING_SNAKE_CASE) for image in images] # convert to torch tensor and permute __a = [ self.extract_flattened_patches(image=__SCREAMING_SNAKE_CASE , max_patches=__SCREAMING_SNAKE_CASE , patch_size=__SCREAMING_SNAKE_CASE) for image in images ] # create attention mask in numpy __a = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] __a = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__SCREAMING_SNAKE_CASE) return encoded_outputs
49
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''', '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''', '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' ) return name def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( ) -> Any: '''simple docstring''' snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(__UpperCAmelCase ) snake_case_ = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=__UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' ) # forward pass snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) a : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
56
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): @slow def A_ ( self : int ) -> Any: lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase ) lowerCamelCase__ : str = AutoTokenizer.from_pretrained('google/mt5-small' ) lowerCamelCase__ : Any = tokenizer('Hello there' , return_tensors='pt' ).input_ids lowerCamelCase__ : Tuple = tokenizer('Hi I am' , return_tensors='pt' ).input_ids lowerCamelCase__ : Optional[int] = model(input_ids.to(UpperCAmelCase ) , labels=labels.to(UpperCAmelCase ) ).loss lowerCamelCase__ : Dict = -(labels.shape[-1] * loss.item()) lowerCamelCase__ : Optional[int] = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
50
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": a : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
56
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
51
'''simple docstring''' import re from filelock import FileLock try: import nltk a : Union[str, Any] = True except (ImportError, ModuleNotFoundError): a : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
56
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __lowerCamelCase : Tuple = (720, 1280) # Height, Width __lowerCamelCase : int = (0.4, 0.6) # if height or width lower than this scale, drop it. __lowerCamelCase : int = 1 / 100 __lowerCamelCase : Any = """""" __lowerCamelCase : List[str] = """""" __lowerCamelCase : List[Any] = """""" __lowerCamelCase : Tuple = 250 def A_ ( ) -> None: UpperCamelCase , UpperCamelCase : Tuple = get_dataset(_lowerCAmelCase , _lowerCAmelCase ) for index in range(_lowerCAmelCase ): UpperCamelCase : Union[str, Any] = random.sample(range(len(_lowerCAmelCase ) ) , 4 ) UpperCamelCase , UpperCamelCase , UpperCamelCase : str = update_image_and_anno( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase : Union[str, Any] = random_chars(32 ) UpperCamelCase : Optional[Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0] UpperCamelCase : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) UpperCamelCase : int = [] for anno in new_annos: UpperCamelCase : Dict = anno[3] - anno[1] UpperCamelCase : Union[str, Any] = anno[4] - anno[2] UpperCamelCase : Optional[int] = anno[1] + width / 2 UpperCamelCase : Tuple = anno[2] + height / 2 UpperCamelCase : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(_lowerCAmelCase ) with open(F"""{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[list, list]: UpperCamelCase : int = [] UpperCamelCase : Tuple = [] for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ): UpperCamelCase : Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCAmelCase ) as in_file: UpperCamelCase : int = in_file.readlines() UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , F"""{label_name}.jpg""" ) UpperCamelCase : str = [] for obj_list in obj_lists: UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " ) UpperCamelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2 UpperCamelCase : List[str] = float(obj[2] ) - float(obj[4] ) / 2 UpperCamelCase : Dict = float(obj[1] ) + float(obj[3] ) / 2 UpperCamelCase : int = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_lowerCAmelCase ) labels.append(_lowerCAmelCase ) return img_paths, labels def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , ) -> tuple[list, list, str]: UpperCamelCase : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCamelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase : Dict = int(scale_x * output_size[1] ) UpperCamelCase : Tuple = int(scale_y * output_size[0] ) UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = [] for i, index in enumerate(_lowerCAmelCase ): UpperCamelCase : str = all_img_list[index] path_list.append(_lowerCAmelCase ) UpperCamelCase : Tuple = all_annos[index] UpperCamelCase : Union[str, Any] = cva.imread(_lowerCAmelCase ) if i == 0: # top-left UpperCamelCase : int = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) ) UpperCamelCase : int = img for bbox in img_annos: UpperCamelCase : Any = bbox[1] * scale_x UpperCamelCase : Optional[Any] = bbox[2] * scale_y UpperCamelCase : str = bbox[3] * scale_x UpperCamelCase : Any = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCamelCase : Any = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) UpperCamelCase : str = img for bbox in img_annos: UpperCamelCase : str = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase : Tuple = bbox[2] * scale_y UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase : str = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase : Dict = img for bbox in img_annos: UpperCamelCase : str = bbox[1] * scale_x UpperCamelCase : List[str] = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase : Dict = bbox[3] * scale_x UpperCamelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCamelCase : str = cva.resize( _lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase : Optional[int] = img for bbox in img_annos: UpperCamelCase : Dict = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase : Any = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase : int = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCamelCase : List[str] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def A_ ( _lowerCAmelCase ) -> str: assert number_char > 1, "The number of character should greater than 1" UpperCamelCase : Optional[Any] = ascii_lowercase + digits return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) if __name__ == "__main__": main() print("""DONE ✅""")
52
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
56
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a__ : Any ={'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] =[ '''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VanForImageClassification''', '''VanModel''', '''VanPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys a__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
53
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class a ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ): super().__init__() snake_case_ = initial_learning_rate snake_case_ = warmup_steps snake_case_ = power snake_case_ = decay_schedule_fn snake_case_ = name def __call__( self : Tuple , lowercase_ : str ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ = tf.cast(lowercase_ , tf.floataa ) snake_case_ = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ = global_step_float / warmup_steps_float snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , ) def A_ ( self : Any ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]: '''simple docstring''' snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, ) if num_warmup_steps: snake_case_ = WarmUp( initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, ) if weight_decay_rate > 0.0: snake_case_ = AdamWeightDecay( learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, ) else: snake_case_ = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class a ( _lowerCamelCase ): def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ): super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) snake_case_ = weight_decay_rate snake_case_ = include_in_weight_decay snake_case_ = exclude_from_weight_decay @classmethod def A_ ( cls : Dict , lowercase_ : Union[str, Any] ): snake_case_ = {'''WarmUp''': WarmUp} return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ): super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ ) snake_case_ = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ): snake_case_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ): snake_case_ ,snake_case_ = list(zip(*lowercase_ ) ) return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ ) def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ = apply_state or {} snake_case_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ ) snake_case_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def A_ ( self : Optional[int] , lowercase_ : int ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return False return True class a ( _lowerCamelCase ): def __init__( self : List[Any] ): snake_case_ = [] snake_case_ = None @property def A_ ( self : Union[str, Any] ): if self._accum_steps is None: snake_case_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def A_ ( self : Dict ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Any , lowercase_ : int ): if not self._gradients: snake_case_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase_ ) != len(self._gradients ): raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" ) for accum_gradient, gradient in zip(self._gradients , lowercase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase_ ) self._accum_steps.assign_add(1 ) def A_ ( self : Optional[int] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase_ ) )
56
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a__ : Tuple = '''Create a default config file for Accelerate with only a few flags set.''' def UpperCAmelCase__ (lowerCAmelCase_="no" , lowerCAmelCase_ = default_json_config_file , lowerCAmelCase_ = False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ ) path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False __SCREAMING_SNAKE_CASE = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) __SCREAMING_SNAKE_CASE = { "compute_environment": "LOCAL_MACHINE", "mixed_precision": mixed_precision, } if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE = torch.cuda.device_count() __SCREAMING_SNAKE_CASE = num_gpus __SCREAMING_SNAKE_CASE = False if num_gpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_GPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_xpu_available() and use_xpu: __SCREAMING_SNAKE_CASE = torch.xpu.device_count() __SCREAMING_SNAKE_CASE = num_xpus __SCREAMING_SNAKE_CASE = False if num_xpus > 1: __SCREAMING_SNAKE_CASE = "MULTI_XPU" else: __SCREAMING_SNAKE_CASE = "NO" elif is_npu_available(): __SCREAMING_SNAKE_CASE = torch.npu.device_count() __SCREAMING_SNAKE_CASE = num_npus __SCREAMING_SNAKE_CASE = False if num_npus > 1: __SCREAMING_SNAKE_CASE = "MULTI_NPU" else: __SCREAMING_SNAKE_CASE = "NO" else: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = "NO" __SCREAMING_SNAKE_CASE = ClusterConfig(**lowerCAmelCase_ ) config.to_json_file(lowerCAmelCase_ ) return path def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = parser.add_parser("default" , parents=lowerCAmelCase_ , help=lowerCAmelCase_ , formatter_class=lowerCAmelCase_ ) parser.add_argument( "--config_file" , default=lowerCAmelCase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , dest="save_location" , ) parser.add_argument( "--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCAmelCase_ , help="Whether or not to use mixed precision training. " "Choose between FP16 and BF16 (bfloat16) training. " "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , ) parser.set_defaults(func=lowerCAmelCase_ ) return parser def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
54
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = "sample" snake_case_ = 1e-2 @property def A_ ( self : Dict ): snake_case_ = 4 snake_case_ = 3 snake_case_ = (32, 32) snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ ) return {"sample": image} @property def A_ ( self : List[Any] ): return (3, 32, 32) @property def A_ ( self : Dict ): return (3, 32, 32) def A_ ( self : Union[str, Any] ): snake_case_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict def A_ ( self : Any ): pass def A_ ( self : str ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def A_ ( self : Dict ): # enable deterministic behavior for gradient checkpointing snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.model_class(**lowercase_ ) model.to(lowercase_ ) assert not model.is_gradient_checkpointing and model.training snake_case_ = model(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() snake_case_ = torch.randn_like(lowercase_ ) snake_case_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing snake_case_ = self.model_class(**lowercase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training snake_case_ = model_a(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() snake_case_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) snake_case_ = dict(model.named_parameters() ) snake_case_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def A_ ( self : Tuple ): snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase_ ) snake_case_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A_ ( self : Tuple ): snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) snake_case_ = model.to(lowercase_ ) model.eval() if torch_device == "mps": snake_case_ = torch.manual_seed(0 ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ = image.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample snake_case_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": snake_case_ = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": snake_case_ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: snake_case_ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) ) @slow class a ( unittest.TestCase ): def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ): return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy" def A_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ): snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ ) return image def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ): snake_case_ = '''fp16''' if fpaa else None snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = AutoencoderKL.from_pretrained( lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , ) model.to(lowercase_ ).eval() return model def A_ ( self : Any , lowercase_ : int=0 ): if torch_device == "mps": return torch.manual_seed(lowercase_ ) return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : List[str] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : Any ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model.encode(lowercase_ ).latent_dist snake_case_ = dist.sample(generator=lowercase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
56
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : Dict = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "mobilenet_v1" def __init__( self , UpperCamelCase=3 , UpperCamelCase=224 , UpperCamelCase=1.0 , UpperCamelCase=8 , UpperCamelCase="relu6" , UpperCamelCase=True , UpperCamelCase=0.999 , UpperCamelCase=0.02 , UpperCamelCase=0.001 , **UpperCamelCase , ): """simple docstring""" super().__init__(**UpperCamelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCamelCase_ = num_channels lowerCamelCase_ = image_size lowerCamelCase_ = depth_multiplier lowerCamelCase_ = min_depth lowerCamelCase_ = hidden_act lowerCamelCase_ = tf_padding lowerCamelCase_ = classifier_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = version.parse("1.11" ) @property def snake_case ( self ): """simple docstring""" return OrderedDict([("pixel_values", {0: "batch"})] ) @property def snake_case ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def snake_case ( self ): """simple docstring""" return 1e-4
55
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a ( _lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1_280, 1_280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ): # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ ,snake_case_ = jax.random.split(lowercase_ ) snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"] def A_ ( self : List[str] ): snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype ) snake_case_ = self.only_cross_attention if isinstance(lowercase_ , lowercase_ ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_ , lowercase_ ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase_ ) snake_case_ = down_blocks # mid snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case_ = [] snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )] snake_case_ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case_ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase_ ) snake_case_ = output_channel snake_case_ = up_blocks # out snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ): # 1. time if not isinstance(lowercase_ , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(lowercase_ , 0 ) snake_case_ = self.time_proj(lowercase_ ) snake_case_ = self.time_embedding(lowercase_ ) # 2. pre-process snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) ) snake_case_ = self.conv_in(lowercase_ ) # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_ , lowercase_ ): snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) else: snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case_ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_ , lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case_ = new_down_block_res_samples # 4. mid snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_ , lowercase_ ): snake_case_ = up_block( lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , ) else: snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train ) # 6. post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = nn.silu(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
56
0
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _UpperCamelCase : '''simple docstring''' __UpperCAmelCase : int __UpperCAmelCase : Node | None =None __UpperCAmelCase : Node | None =None def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = Node(1 ) __lowerCAmelCase = Node(2 ) __lowerCAmelCase = Node(3 ) __lowerCAmelCase = Node(4 ) __lowerCAmelCase = Node(5 ) return tree def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] if root is None: return output __lowerCAmelCase = deque([root] ) while process_queue: __lowerCAmelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(_UpperCamelCase , _UpperCamelCase ) return output def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] def populate_output(_UpperCamelCase , _UpperCamelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(_UpperCamelCase , _UpperCamelCase ) return output def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' if root is None: return [] __lowerCAmelCase = [] __lowerCAmelCase = 0 __lowerCAmelCase = height(_UpperCamelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(_UpperCamelCase , _UpperCamelCase ) ) __lowerCAmelCase = 1 else: output.append(get_nodes_from_right_to_left(_UpperCamelCase , _UpperCamelCase ) ) __lowerCAmelCase = 0 return output def _lowerCamelCase ( ): # Main function for testing. '''simple docstring''' __lowerCAmelCase = make_tree() print(f"In-order Traversal: {inorder(_UpperCamelCase )}" ) print(f"Pre-order Traversal: {preorder(_UpperCamelCase )}" ) print(f"Post-order Traversal: {postorder(_UpperCamelCase )}" , "\n" ) print(f"Height of Tree: {height(_UpperCamelCase )}" , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(_UpperCamelCase ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(_UpperCamelCase ) + 1 ): print(f"Level {level}:" , get_nodes_from_left_to_right(_UpperCamelCase , level=_UpperCamelCase ) ) print("\nZigZag order Traversal: " ) print(zigzag(_UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a : Dict = (720, 1280) # Height, Width a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it. a : Dict = 1 / 100 a : str = '' a : Any = '' a : Optional[int] = '' a : List[str] = 250 def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase ) for index in range(__UpperCAmelCase ): snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 ) snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0] snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) snake_case_ = [] for anno in new_annos: snake_case_ = anno[3] - anno[1] snake_case_ = anno[4] - anno[2] snake_case_ = anno[1] + width / 2 snake_case_ = anno[2] + height / 2 snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__UpperCAmelCase ) with open(F"{file_root}.txt", '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]: '''simple docstring''' snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0] with open(__UpperCAmelCase ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' ) snake_case_ = float(obj[1] ) - float(obj[3] ) / 2 snake_case_ = float(obj[2] ) - float(obj[4] ) / 2 snake_case_ = float(obj[1] ) + float(obj[3] ) / 2 snake_case_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__UpperCAmelCase ) labels.append(__UpperCAmelCase ) return img_paths, labels def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]: '''simple docstring''' snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta ) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = int(scale_x * output_size[1] ) snake_case_ = int(scale_y * output_size[0] ) snake_case_ = [] snake_case_ = [] for i, index in enumerate(__UpperCAmelCase ): snake_case_ = all_img_list[index] path_list.append(__UpperCAmelCase ) snake_case_ = all_annos[index] snake_case_ = cva.imread(__UpperCAmelCase ) if i == 0: # top-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = bbox[2] * scale_y snake_case_ = bbox[3] * scale_x snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = bbox[2] * scale_y snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = bbox[3] * scale_x snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right snake_case_ = cva.resize( __UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: snake_case_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
56
0
'''simple docstring''' from __future__ import annotations lowercase_ = 10 def lowerCamelCase ( __lowerCamelCase : list[int] ) ->list[int]: _SCREAMING_SNAKE_CASE = 1 _SCREAMING_SNAKE_CASE = max(__lowerCamelCase ) while placement <= max_digit: # declare and initialize empty buckets _SCREAMING_SNAKE_CASE = [[] for _ in range(__lowerCamelCase )] # split list_of_ints between the buckets for i in list_of_ints: _SCREAMING_SNAKE_CASE = int((i / placement) % RADIX ) buckets[tmp].append(__lowerCamelCase ) # put each buckets' contents into list_of_ints _SCREAMING_SNAKE_CASE = 0 for b in range(__lowerCamelCase ): for i in buckets[b]: _SCREAMING_SNAKE_CASE = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
58
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a : @staticmethod def A_ ( *lowercase_ : int , **lowercase_ : str ): pass @is_pipeline_test @require_vision @require_timm @require_torch class a ( unittest.TestCase ): snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ): snake_case_ = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def A_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : int ): snake_case_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) import datasets snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case_ = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] snake_case_ = object_detector(lowercase_ , threshold=0.0 ) self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for outputs in batch_outputs: self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def A_ ( self : int ): pass @require_torch def A_ ( self : Tuple ): snake_case_ = '''hf-internal-testing/tiny-detr-mobilenetsv3''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def A_ ( self : Optional[int] ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Tuple ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : str ): snake_case_ = 0.9985 snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def A_ ( self : Dict ): snake_case_ = '''Narsil/layoutlmv3-finetuned-funsd''' snake_case_ = 0.9993 snake_case_ = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ ) snake_case_ = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
56
0
import math def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ): if initial_intensity < 0: raise ValueError("The value of intensity cannot be negative" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="""malus_law""")
59
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def A_ ( self : List[str] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def A_ ( self : str ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Tuple ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ): snake_case_ = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , lowercase_ ) snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ): snake_case_ = self.num_labels snake_case_ = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.num_choices snake_case_ = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ): snake_case_ = self.num_labels snake_case_ = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : Union[str, Any] ): snake_case_ = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = True def A_ ( self : Tuple ): snake_case_ = MPNetModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def A_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ = model(lowercase_ )[0] snake_case_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase_ ) snake_case_ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
56
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = StableDiffusionInpaintPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS __UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __UpperCamelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __UpperCamelCase = frozenset([] ) def lowerCamelCase__ ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , ) lowerCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) lowerCAmelCase : Any = CLIPTextModel(UpperCamelCase_ ) lowerCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched lowerCAmelCase : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCAmelCase : Any = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) ) if str(UpperCamelCase_ ).startswith('''mps''' ): lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ ) else: lowerCAmelCase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase : Dict = self.get_dummy_components() lowerCAmelCase : Any = StableDiffusionInpaintPipeline(**UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : Tuple = sd_pipe(**UpperCamelCase_ ).images lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase : Optional[Any] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : str ): lowerCAmelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCAmelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCAmelCase : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting''' lowerCAmelCase : Tuple = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() lowerCAmelCase : int = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCAmelCase : List[str] = torch.manual_seed(0 ) lowerCAmelCase : int = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , ) lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCAmelCase : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCAmelCase : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) lowerCAmelCase : int = '''stabilityai/stable-diffusion-2-inpainting''' lowerCAmelCase : Any = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing() lowerCAmelCase : str = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase : Tuple = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , ) lowerCAmelCase : Tuple = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowerCamelCase__ ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowerCAmelCase : Any = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2-inpainting''' lowerCAmelCase : List[str] = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' ) lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained( UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase : int = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowerCAmelCase : Tuple = torch.manual_seed(0 ) lowerCAmelCase : Dict = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ) lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
60
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class a ( _lowerCamelCase ): def A_ ( self : str ): snake_case_ = tempfile.mkdtemp() snake_case_ = 8 # DPR tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase_ ) ) def A_ ( self : Union[str, Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : Union[str, Any] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : int ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def A_ ( self : str ): shutil.rmtree(self.tmpdirname ) def A_ ( self : str ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def A_ ( self : str ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def A_ ( self : str , lowercase_ : bool ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: snake_case_ = os.path.join(self.tmpdirname , '''dataset''' ) snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , ) return retriever def A_ ( self : Tuple ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) ) snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def A_ ( self : Optional[Any] ): snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : str ): snake_case_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = self.get_dummy_dataset() retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : int ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : str ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : Any ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : Any ): snake_case_ = 1 snake_case_ = self.get_dummy_legacy_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : List[str] ): import torch snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) snake_case_ ,snake_case_ ,snake_case_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , np.ndarray ) snake_case_ = retriever( lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , ) snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : Tuple ): snake_case_ = self.get_dpr_ctx_encoder_tokenizer() snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) retriever.set_ctx_encoder_tokenizer(lowercase_ ) snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) self.assertEqual( len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
56
0
"""simple docstring""" import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A_ (lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = MobileBertTokenizer SCREAMING_SNAKE_CASE__ : int = MobileBertTokenizerFast SCREAMING_SNAKE_CASE__ : str = True SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Optional[int] = filter_non_english SCREAMING_SNAKE_CASE__ : List[str] = """google/mobilebert-uncased""" def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() UpperCAmelCase_ : Union[str, Any] = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ : Any = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Dict = "unwanted, running" return input_text, output_text def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowercase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [9, 6, 7, 12, 10, 11] ) def UpperCamelCase__ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Any = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = "UNwant\u00E9d,running" UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(lowercase_ ) UpperCAmelCase_ : List[str] = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : List[Any] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : Dict = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer() UpperCAmelCase_ : str = tokenizer.encode(lowercase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) # With lower casing UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase_ ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer(do_lower_case=lowercase_ ) UpperCAmelCase_ : int = "UNwant\u00E9d,running" UpperCAmelCase_ : Dict = tokenizer.tokenize(lowercase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : str = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowercase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=lowercase_ , strip_accents=lowercase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : str = {} for i, token in enumerate(lowercase_ ): UpperCAmelCase_ : str = i UpperCAmelCase_ : str = WordpieceTokenizer(vocab=lowercase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCamelCase__ ( self ): """simple docstring""" self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCamelCase__ ( self ): """simple docstring""" self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCamelCase__ ( self ): """simple docstring""" self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowercase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) UpperCAmelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ ) UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ ) UpperCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase_ ) UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCamelCase__ ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[int] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Optional[int] = tokenizer_r.encode_plus( lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ , ) UpperCAmelCase_ : Tuple = tokenizer_r.do_lower_case if hasattr(lowercase_ , "do_lower_case" ) else False UpperCAmelCase_ : Any = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = ["的", "人", "有"] UpperCAmelCase_ : List[str] = "".join(lowercase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Any = True UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : Optional[Any] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : List[str] = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(lowercase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowercase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : Optional[int] = tokenizer_p.encode(lowercase_ , add_special_tokens=lowercase_ ) UpperCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(lowercase_ ) UpperCAmelCase_ : str = tokenizer_p.convert_ids_to_tokens(lowercase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : List[str] = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowercase_ ) ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ )
61
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a : Dict = None a : List[Any] = logging.get_logger(__name__) a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a : str = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a : List[Any] = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class a ( _lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = TaTokenizer snake_case_ = [] def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = extra_ids @staticmethod def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , ) return max_model_length def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return snake_case_ = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) logger.info(F"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: snake_case_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A_ ( self : Dict ): return list( set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) ) def A_ ( self : Any ): return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
56
0
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> List[Any]: __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =SamImageProcessor() __UpperCamelCase =SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def _a ( self , **A_ ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def _a ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Tuple: __UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __UpperCamelCase =[Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self ) -> List[str]: __UpperCamelCase =SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase =self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __UpperCamelCase =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def _a ( self ) -> str: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =image_processor(A_ , return_tensors='np' ) __UpperCamelCase =processor(images=A_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _a ( self ) -> Optional[int]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =[torch.ones((1, 3, 5, 5) )] __UpperCamelCase =[[1764, 2646]] __UpperCamelCase =[[683, 1024]] __UpperCamelCase =processor.post_process_masks(A_ , A_ , A_ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) __UpperCamelCase =processor.post_process_masks( A_ , torch.tensor(A_ ) , torch.tensor(A_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np __UpperCamelCase =[np.ones((1, 3, 5, 5) )] __UpperCamelCase =processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) __UpperCamelCase =[[1, 0], [0, 1]] with self.assertRaises(A_ ): __UpperCamelCase =processor.post_process_masks(A_ , np.array(A_ ) , np.array(A_ ) ) @require_vision @require_tf class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Any: __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =SamImageProcessor() __UpperCamelCase =SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def _a ( self , **A_ ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def _a ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Any: __UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __UpperCamelCase =[Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self ) -> Optional[int]: __UpperCamelCase =SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase =self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) __UpperCamelCase =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =image_processor(A_ , return_tensors='np' ) __UpperCamelCase =processor(images=A_ , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _a ( self ) -> str: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =[tf.ones((1, 3, 5, 5) )] __UpperCamelCase =[[1764, 2646]] __UpperCamelCase =[[683, 1024]] __UpperCamelCase =processor.post_process_masks(A_ , A_ , A_ , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) __UpperCamelCase =processor.post_process_masks( A_ , tf.convert_to_tensor(A_ ) , tf.convert_to_tensor(A_ ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np __UpperCamelCase =[np.ones((1, 3, 5, 5) )] __UpperCamelCase =processor.post_process_masks( A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) __UpperCamelCase =[[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __UpperCamelCase =processor.post_process_masks( A_ , np.array(A_ ) , np.array(A_ ) , return_tensors='tf' ) @require_vision @require_torchvision class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Any: __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =SamImageProcessor() __UpperCamelCase =SamProcessor(A_ ) processor.save_pretrained(self.tmpdirname ) def _a ( self , **A_ ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor def _a ( self ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Dict: __UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __UpperCamelCase =[Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _a ( self ) -> Dict: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __UpperCamelCase =[tf.convert_to_tensor(A_ )] __UpperCamelCase =[torch.tensor(A_ )] __UpperCamelCase =[[1764, 2646]] __UpperCamelCase =[[683, 1024]] __UpperCamelCase =processor.post_process_masks( A_ , A_ , A_ , return_tensors='tf' ) __UpperCamelCase =processor.post_process_masks( A_ , A_ , A_ , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _a ( self ) -> Union[str, Any]: __UpperCamelCase =self.get_image_processor() __UpperCamelCase =SamProcessor(image_processor=A_ ) __UpperCamelCase =self.prepare_image_inputs() __UpperCamelCase =image_processor(A_ , return_tensors='pt' )['pixel_values'].numpy() __UpperCamelCase =processor(images=A_ , return_tensors='pt' )['pixel_values'].numpy() __UpperCamelCase =image_processor(A_ , return_tensors='tf' )['pixel_values'].numpy() __UpperCamelCase =processor(images=A_ , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(A_ , A_ ) ) self.assertTrue(np.allclose(A_ , A_ ) ) self.assertTrue(np.allclose(A_ , A_ ) )
62
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
'''simple docstring''' import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCamelCase ( lowercase : Any ) -> int: _a = fname.split(os.path.sep )[-1] return re.search(r"^(.*)_\d+\.jpg$" , lowercase ).groups()[0] class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , __a : List[Any] , __a : List[str]=None , __a : List[Any]=None ): _a = file_names _a = image_transform _a = label_to_id def __len__( self : Any ): return len(self.file_names ) def __getitem__( self : Optional[Any] , __a : str ): _a = self.file_names[idx] _a = PIL.Image.open(__a ) _a = raw_image.convert("RGB" ) if self.image_transform is not None: _a = self.image_transform(__a ) _a = extract_label(__a ) if self.label_to_id is not None: _a = self.label_to_id[label] return {"image": image, "label": label} def _lowerCamelCase ( lowercase : List[Any] , lowercase : int ) -> str: # Initialize accelerator if args.with_tracking: _a = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: _a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a = config["lr"] _a = int(config["num_epochs"] ) _a = int(config["seed"] ) _a = int(config["batch_size"] ) _a = config["image_size"] if not isinstance(lowercase , (list, tuple) ): _a = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , "isdigit" ): if args.checkpointing_steps == "epoch": _a = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _a = int(args.checkpointing_steps ) else: raise ValueError( F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: _a = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _a = os.path.split(lowercase )[-1].split("." )[0] accelerator.init_trackers(lowercase , lowercase ) # Grab all the image filenames _a = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )] # Build the label correspondences _a = [extract_label(lowercase ) for fname in file_names] _a = list(set(lowercase ) ) id_to_label.sort() _a = {lbl: i for i, lbl in enumerate(lowercase )} # Set the seed before splitting the data. np.random.seed(lowercase ) torch.manual_seed(lowercase ) torch.cuda.manual_seed_all(lowercase ) # Split our filenames between train and validation _a = np.random.permutation(len(lowercase ) ) _a = int(0.8 * len(lowercase ) ) _a = random_perm[:cut] _a = random_perm[cut:] # For training we use a simple RandomResizedCrop _a = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] ) _a = PetsDataset( [file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase ) # For evaluation, we use a deterministic Resize _a = Compose([Resize(lowercase ), ToTensor()] ) _a = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase ) # Instantiate dataloaders. _a = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 ) _a = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a = create_model("resnet50d" , pretrained=lowercase , num_classes=len(lowercase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _a = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _a = False for param in model.get_classifier().parameters(): _a = True # We normalize the batches of images to be a bit faster. _a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device ) _a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _a = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _a = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # We need to keep track of how many total steps we have iterated over _a = 0 # We also need to keep track of the starting epoch so files are named properly _a = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) _a = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _a = os.path.splitext(lowercase )[0] if "epoch" in training_difference: _a = int(training_difference.replace("epoch_" , "" ) ) + 1 _a = None else: _a = int(training_difference.replace("step_" , "" ) ) _a = resume_step // len(lowercase ) resume_step -= starting_epoch * len(lowercase ) # Now we train the model for epoch in range(lowercase , lowercase ): model.train() if args.with_tracking: _a = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _a = accelerator.skip_first_batches(lowercase , lowercase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _a = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _a = {k: v.to(accelerator.device ) for k, v in batch.items()} _a = (batch["image"] - mean) / std _a = model(lowercase ) _a = torch.nn.functional.cross_entropy(lowercase , batch["label"] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(lowercase , lowercase ): _a = F'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _a = os.path.join(args.output_dir , lowercase ) accelerator.save_state(lowercase ) model.eval() _a = 0 _a = 0 for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _a = {k: v.to(accelerator.device ) for k, v in batch.items()} _a = (batch["image"] - mean) / std with torch.no_grad(): _a = model(lowercase ) _a = outputs.argmax(dim=-1 ) _a , _a = accelerator.gather_for_metrics((predictions, batch["label"]) ) _a = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _a = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { "accuracy": 100 * eval_metric, "train_loss": total_loss.item() / len(lowercase ), "epoch": epoch, } , step=lowercase , ) if checkpointing_steps == "epoch": _a = F'epoch_{epoch}' if args.output_dir is not None: _a = os.path.join(args.output_dir , lowercase ) accelerator.save_state(lowercase ) if args.with_tracking: accelerator.end_training() def _lowerCamelCase ( ) -> List[str]: _a = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument("--data_dir" , required=lowercase , help="The data folder on disk." ) parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." ) parser.add_argument( "--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--checkpointing_steps" , type=lowercase , default=lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , ) parser.add_argument( "--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=lowercase , default=lowercase , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) _a = parser.parse_args() _a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
63
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' snake_case_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ ,snake_case_ = emb.weight.shape snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' ) snake_case_ = mam_aaa['''args'''] snake_case_ = mam_aaa['''model'''] snake_case_ = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case_ = args.share_decoder_input_output_embed snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case_ = SpeechaTextConfig( vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, ) snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F" but all the following weights are missing {missing}" ) if tie_embeds: snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
56
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''', # See all XGLM models at https://huggingface.co/models?filter=xglm } class lowercase( __a ): '''simple docstring''' lowercase__ = "xglm" lowercase__ = ["past_key_values"] lowercase__ = { "num_attention_heads": "attention_heads", "hidden_size": "d_model", "num_hidden_layers": "num_layers", } def __init__( self: Dict, a_: int=256_008, a_: List[str]=2_048, a_: Dict=1_024, a_: int=4_096, a_: List[Any]=24, a_: Any=16, a_: Dict="gelu", a_: Optional[Any]=0.1, a_: str=0.1, a_: Union[str, Any]=0.0, a_: List[str]=0.0, a_: List[Any]=0.02, a_: Dict=True, a_: int=True, a_: List[Any]=2, a_: str=1, a_: Optional[int]=0, a_: Tuple=2, **a_: Tuple, ): '''simple docstring''' _snake_case : Union[str, Any] = vocab_size _snake_case : Optional[int] = max_position_embeddings _snake_case : Union[str, Any] = d_model _snake_case : Optional[int] = ffn_dim _snake_case : List[Any] = num_layers _snake_case : int = attention_heads _snake_case : int = activation_function _snake_case : List[str] = dropout _snake_case : List[Any] = attention_dropout _snake_case : Any = activation_dropout _snake_case : Union[str, Any] = layerdrop _snake_case : int = init_std _snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True _snake_case : Union[str, Any] = use_cache super().__init__( pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, **a_, )
64
'''simple docstring''' from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCamelCase ): snake_case_ = ["transformers", "torch", "note_seq"] def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
56
0
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class A : def __init__(self : List[str] , __UpperCAmelCase : Optional[Any] , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = 1_3 UpperCAmelCase__ = 7 UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = 9_9 UpperCAmelCase__ = 3_2 UpperCAmelCase__ = 2 UpperCAmelCase__ = 4 UpperCAmelCase__ = 3_7 UpperCAmelCase__ = "gelu" UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = 5_1_2 UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 2 UpperCAmelCase__ = 0.02 UpperCAmelCase__ = 3 UpperCAmelCase__ = 4 UpperCAmelCase__ = None def lowercase_ (self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = None if self.use_input_mask: UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ = None UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ (self : Union[str, Any] ) -> Tuple: """simple docstring""" ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = self.prepare_config_and_inputs() UpperCAmelCase__ = True UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowercase_ (self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> str: """simple docstring""" UpperCAmelCase__ = TFEsmModel(config=__UpperCAmelCase ) UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase__ = model(__UpperCAmelCase ) UpperCAmelCase__ = [input_ids, input_mask] UpperCAmelCase__ = model(__UpperCAmelCase ) UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , ) -> List[str]: """simple docstring""" UpperCAmelCase__ = True UpperCAmelCase__ = TFEsmModel(config=__UpperCAmelCase ) UpperCAmelCase__ = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } UpperCAmelCase__ = model(__UpperCAmelCase ) UpperCAmelCase__ = [input_ids, input_mask] UpperCAmelCase__ = model(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ) # Also check the case where encoder outputs are not passed UpperCAmelCase__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFEsmForMaskedLM(config=__UpperCAmelCase ) UpperCAmelCase__ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ (self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = TFEsmForTokenClassification(config=__UpperCAmelCase ) UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ (self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) = config_and_inputs UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __UpperCAmelCase : List[str] = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __UpperCAmelCase : Optional[int] = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase : str = False __UpperCAmelCase : Union[str, Any] = False def lowercase_ (self : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFEsmModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def lowercase_ (self : List[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def lowercase_ (self : str ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowercase_ (self : List[Any] ) -> int: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase ) def lowercase_ (self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowercase_ (self : List[str] ) -> int: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowercase_ (self : List[Any] ) -> str: """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = TFEsmModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip("Protein models do not support embedding resizing." ) def lowercase_ (self : Optional[int] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("Protein models do not support embedding resizing." ) def lowercase_ (self : Dict ) -> str: """simple docstring""" pass def lowercase_ (self : Tuple ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(__UpperCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer UpperCAmelCase__ = model.get_bias() assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) for k, v in name.items(): assert isinstance(__UpperCAmelCase , tf.Variable ) else: UpperCAmelCase__ = model.get_output_embeddings() assert x is None UpperCAmelCase__ = model.get_bias() assert name is None @require_tf class A ( unittest.TestCase ): @slow def lowercase_ (self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase__ = model(__UpperCAmelCase )[0] UpperCAmelCase__ = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , __UpperCAmelCase ) # compare the actual values for a slice. UpperCAmelCase__ = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def lowercase_ (self : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) UpperCAmelCase__ = model(__UpperCAmelCase )[0] # compare the actual values for a slice. UpperCAmelCase__ = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
65
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :Tuple = tempfile.mkdtemp() snake_case_ :Optional[Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case_ :List[str] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } snake_case_ :List[str] = os.path.join(self.tmpdirname , snake_case ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(snake_case , snake_case ) def lowerCAmelCase_ ( self: Dict , **snake_case: Any ) -> List[Any]: return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: List[str] ) -> Union[str, Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: Dict ) -> Any: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self: List[Any] ) -> int: snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case_ :Dict = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self: List[Any] ) -> Any: snake_case_ :Optional[Any] = self.get_tokenizer() snake_case_ :Tuple = self.get_rust_tokenizer() snake_case_ :Optional[int] = self.get_image_processor() snake_case_ :List[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ :Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case ) snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ :int = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case ) self.assertIsInstance(processor_fast.tokenizer , snake_case ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case ) self.assertIsInstance(processor_fast.image_processor , snake_case ) def lowerCAmelCase_ ( self: str ) -> int: snake_case_ :List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ :List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case_ :Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 ) snake_case_ :int = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case ) def lowerCAmelCase_ ( self: Optional[Any] ) -> str: snake_case_ :Tuple = self.get_image_processor() snake_case_ :List[str] = self.get_tokenizer() snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[Any] = self.prepare_image_inputs() snake_case_ :Optional[int] = image_processor(snake_case , return_tensors="""np""" ) snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]: snake_case_ :List[str] = self.get_image_processor() snake_case_ :List[Any] = self.get_tokenizer() snake_case_ :Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[Any] = """lower newer""" snake_case_ :int = processor(text=snake_case ) snake_case_ :Optional[Any] = tokenizer(snake_case , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]: snake_case_ :Any = self.get_image_processor() snake_case_ :Optional[int] = self.get_tokenizer() snake_case_ :Union[str, Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Dict = """lower newer""" snake_case_ :Dict = self.prepare_image_inputs() snake_case_ :Dict = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(snake_case ): processor() def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]: snake_case_ :Optional[Any] = self.get_image_processor() snake_case_ :Optional[Any] = self.get_tokenizer() snake_case_ :str = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ :int = processor.batch_decode(snake_case ) snake_case_ :List[str] = tokenizer.batch_decode(snake_case ) self.assertListEqual(snake_case , snake_case ) def lowerCAmelCase_ ( self: List[Any] ) -> List[str]: snake_case_ :List[Any] = self.get_image_processor() snake_case_ :str = self.get_tokenizer() snake_case_ :Optional[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case ) snake_case_ :List[Any] = """lower newer""" snake_case_ :Dict = self.prepare_image_inputs() snake_case_ :int = processor(text=snake_case , images=snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
66
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a : Dict = logging.get_logger(__name__) a : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class a ( _lowerCamelCase ): snake_case_ = "marian" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ): snake_case_ = vocab_size snake_case_ = decoder_vocab_size or vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class a ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A_ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ = {0: '''batch'''} snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A_ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(lowercase_ , self ).outputs if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape snake_case_ = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ ,snake_case_ = self.num_layers snake_case_ = min(lowercase_ , lowercase_ ) snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ ,snake_case_ = self.num_layers snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ ) snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: snake_case_ = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: snake_case_ = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def A_ ( self : List[str] ): return 1e-4
56
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __lowerCamelCase = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __lowerCamelCase = max( mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , ) __lowerCamelCase = val return f[i][j] def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]: __lowerCamelCase = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __lowerCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __lowerCamelCase = dp[i - 1][w_] return dp[n][w_], dp def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int: if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) __lowerCamelCase = len(UpperCamelCase__ ) if num_items != len(UpperCamelCase__ ): __lowerCamelCase = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(UpperCamelCase__ )} values""" ) raise ValueError(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): if not isinstance(wt[i] , UpperCamelCase__ ): __lowerCamelCase = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(UpperCamelCase__ ) __lowerCamelCase , __lowerCamelCase = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = set() _construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return optimal_val, example_optional_set def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ ) else: optimal_set.add(UpperCamelCase__ ) _construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ ) if __name__ == "__main__": __UpperCAmelCase =[3, 2, 4, 4] __UpperCAmelCase =[4, 3, 2, 3] __UpperCAmelCase =4 __UpperCAmelCase =6 __UpperCAmelCase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __UpperCAmelCase , __UpperCAmelCase =knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __UpperCAmelCase , __UpperCAmelCase =knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
67
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = CycleDiffusionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A_ ( self : Tuple ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ = CLIPTextModel(lowercase_ ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ): snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ = torch.manual_seed(lowercase_ ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def A_ ( self : Union[str, Any] ): snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A_ ( self : Union[str, Any] ): snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half''' ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Optional[int] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def A_ ( self : List[Any] ): return super().test_inference_batch_single_identical() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_save_load_optional_components() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class a ( unittest.TestCase ): def A_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Union[str, Any] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def A_ ( self : List[str] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2e-2
56
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} lowerCAmelCase__ = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } lowerCAmelCase__ = { """abeja/gpt-neox-japanese-2.7b""": 2_0_4_8, } def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as f: A__ = json.loads(f.read() ) A__ = collections.OrderedDict() A__ = collections.OrderedDict() A__ = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as f: A__ = f.readlines() A__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ): A__ = b A__ = idx for wd in b: A__ = idx return vocab, raw_vocab, ids_to_tokens, emoji class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self , lowercase , lowercase , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|startoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ) -> str: '''simple docstring''' super().__init__( unk_token=lowercase , pad_token=lowercase , bos_token=lowercase , eos_token=lowercase , do_clean_text=lowercase , **lowercase , ) if not os.path.isfile(lowercase ): raise ValueError( F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(lowercase ): raise ValueError( F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) A__ = do_clean_text A__ , A__ , A__ , A__ = load_vocab_and_emoji(lowercase , lowercase ) A__ = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' return len(self.raw_vocab ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' return self.subword_tokenizer.tokenize(lowercase , clean=self.do_clean_text ) def UpperCamelCase ( self , lowercase ) -> List[str]: '''simple docstring''' return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) ) def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(lowercase ) def UpperCamelCase ( self , lowercase ) -> Optional[Any]: '''simple docstring''' A__ = "".join(lowercase ).strip() return out_string def UpperCamelCase ( self , lowercase ) -> List[int]: '''simple docstring''' A__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase , add_special_tokens=lowercase ) + [self.eos_token_id] ) if len(lowercase ) > self.model_max_length: A__ = input_ids[-self.model_max_length :] return input_ids def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]: '''simple docstring''' A__ = 0 if os.path.isdir(lowercase ): A__ = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A__ = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: A__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) A__ = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(lowercase , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) A__ = token_index writer.write(",".join(lowercase ) + "\n" ) index += 1 with open(lowercase , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , lowercase ) return vocab_file, emoji_file class a__ ( snake_case ): """simple docstring""" def __init__( self , lowercase , lowercase , lowercase ) -> List[Any]: '''simple docstring''' A__ = vocab # same as swe A__ = ids_to_tokens # same as bpe A__ = emoji A__ = np.max([len(lowercase ) for w in self.vocab.keys()] ) A__ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) A__ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) A__ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) A__ = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) A__ = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) A__ = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) A__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" A__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" A__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self ) -> Dict: '''simple docstring''' return len(self.ids_to_tokens ) def UpperCamelCase ( self , lowercase ) -> Optional[int]: '''simple docstring''' A__ = self.content_repattera.sub("<URL>" , lowercase ) A__ = self.content_repattera.sub("<EMAIL>" , lowercase ) A__ = self.content_repattera.sub("<TEL>" , lowercase ) A__ = self.content_repattera.sub("<DATE>" , lowercase ) A__ = self.content_repattera.sub("<DATE>" , lowercase ) A__ = self.content_repattera.sub("<PRICE>" , lowercase ) A__ = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: A__ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def UpperCamelCase ( self , lowercase , lowercase=False ) -> List[Any]: '''simple docstring''' A__ = text.replace(" " , "<SP>" ) A__ = text.replace(" " , "<SP>" ) A__ = text.replace("\r\n" , "<BR>" ) A__ = text.replace("\n" , "<BR>" ) A__ = text.replace("\r" , "<BR>" ) A__ = text.replace("\t" , "<TAB>" ) A__ = text.replace("—" , "ー" ) A__ = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: A__ = text.replace(lowercase , lowercase ) if clean: A__ = self.clean_text(lowercase ) def check_simbol(lowercase ): A__ = x.encode() if len(lowercase ) == 1 and len(lowercase ) == 2: A__ = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC_2_A_1 and c <= 0XC_2_B_F) or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3) or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F) or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2) ): return True return False def checkuae(lowercase ): A__ = x.encode() if len(lowercase ) == 1 and len(lowercase ) == 3: A__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F: return True return False A__ = 0 A__ = [] while pos < len(lowercase ): A__ = min(len(lowercase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 A__ = [] # (token_id, token, pos) for e in range(lowercase , lowercase , -1 ): A__ = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowercase ) > 2: A__ = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowercase ) > 0: # the smallest token_id is adopted A__ , A__ , A__ = sorted(lowercase , key=lambda lowercase : x[0] )[0] result.append(lowercase ) A__ = e else: A__ = pos + 1 A__ = text[pos:end] if check_simbol(lowercase ): result.append("<KIGOU>" ) elif checkuae(lowercase ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) A__ = end return result def UpperCamelCase ( self , lowercase , lowercase="\n" ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowercase ) > 0: words.append(bytearray(lowercase ).decode("utf-8" , errors="replace" ) ) A__ = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(lowercase ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(lowercase ) if len(lowercase ) > 0: words.append(bytearray(lowercase ).decode("utf-8" , errors="replace" ) ) A__ = "".join(lowercase ) return text
68
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a ( _lowerCamelCase ): snake_case_ = "big_bird" def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ): super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = rescale_embeddings snake_case_ = attention_type snake_case_ = use_bias snake_case_ = block_size snake_case_ = num_random_blocks snake_case_ = classifier_dropout class a ( _lowerCamelCase ): @property def A_ ( self : str ): if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
56
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "facebook/bart-large-mnli" SCREAMING_SNAKE_CASE_ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) SCREAMING_SNAKE_CASE_ = "text_classifier" SCREAMING_SNAKE_CASE_ = AutoTokenizer SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE_ = ["text", ["text"]] SCREAMING_SNAKE_CASE_ = ["text"] def a_ ( self) -> Dict: super().setup() snake_case_ = self.model.config snake_case_ = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): snake_case_ = int(lowerCAmelCase__) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = labels return self.pre_processor( [text] * len(lowerCAmelCase__), [f'This example is {label}' for label in labels], return_tensors='pt', padding='max_length', ) def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = outputs.logits snake_case_ = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
69
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str: '''simple docstring''' assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ = SqlDatasetReader( '''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} snake_case_ = features.copy() if features else default_expected_features snake_case_ = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con: snake_case_ = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() with pytest.raises(__UpperCAmelCase ): SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
56
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : List[str] =logging.get_logger(__name__) def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class UpperCAmelCase ( snake_case_ ): _lowercase: Any = ['''pixel_values'''] def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : str , ) -> None: super().__init__(**__snake_case ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56} _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = offset _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__snake_case , size["""shortest_edge"""] , default_to_square=__snake_case ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : bool = True , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> Dict: _lowerCAmelCase = image.astype(np.floataa ) if offset: _lowerCAmelCase = image - (scale / 2) return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__snake_case ) if do_resize: _lowerCAmelCase = self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) if do_center_crop: _lowerCAmelCase = self.center_crop(__snake_case , size=__snake_case ) if do_rescale: _lowerCAmelCase = self.rescale(image=__snake_case , scale=__snake_case , offset=__snake_case ) if do_normalize: _lowerCAmelCase = self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) _lowerCAmelCase = to_channel_dimension_format(__snake_case , __snake_case ) return image def lowercase__ ( self : List[Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ) -> PIL.Image.Image: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = offset if offset is not None else self.offset _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__snake_case , default_to_square=__snake_case ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__snake_case , param_name="""crop_size""" ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__snake_case ) _lowerCAmelCase = [ [ self._preprocess_image( image=__snake_case , do_resize=__snake_case , size=__snake_case , resample=__snake_case , do_center_crop=__snake_case , crop_size=__snake_case , do_rescale=__snake_case , rescale_factor=__snake_case , offset=__snake_case , do_normalize=__snake_case , image_mean=__snake_case , image_std=__snake_case , data_format=__snake_case , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
70
'''simple docstring''' from collections import defaultdict def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCAmelCase ) if ret % 2 == 0: cuts.append(__UpperCAmelCase ) return ret def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a ,a : Dict = 10, 9 a : Dict = defaultdict(list) a : dict[int, bool] = {} a : list[int] = [] a : Tuple = 0 a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
56
0
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def A ( a_ ) -> List[Any]: __UpperCamelCase : int =checkpoints.load_tax_checkpoint(a_ ) __UpperCamelCase : List[str] =flatten_dict(a_ ) return flax_params def A ( a_ ) -> Optional[int]: __UpperCamelCase : Union[str, Any] ={} __UpperCamelCase : Any ={ 'token_embedder': 'embeddings', 'encoder_norm': 'layernorm', 'kernel': 'weight', '.out': '.output', 'scale': 'weight', 'embedders_0.pos_embedding': 'row_embedder.weight', 'embedders_1.pos_embedding': 'column_embedder.weight', } __UpperCamelCase : Any ={ 'query': 'attention.query', 'key': 'attention.key', 'value': 'attention.value', 'output.dense': 'output', 'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o', 'pre_self_attention_layer_norm': 'self_attention.layer_norm', 'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm', 'mlp.': 'mlp.DenseReluDense.', 'pre_mlp_layer_norm': 'mlp.layer_norm', 'self_attention.o': 'self_attention.attention.o', 'decoder.embeddings.embedding': 'decoder.embed_tokens.weight', 'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight', 'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.logits_dense.weight': 'decoder.lm_head.weight', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __UpperCamelCase : Tuple ='.'.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __UpperCamelCase : Union[str, Any] =new_key.replace(a_ ,a_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __UpperCamelCase : int =new_key.replace(a_ ,a_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __UpperCamelCase : Dict =re.sub(r'layers_(\d+)' ,r'layer.\1' ,a_ ) __UpperCamelCase : int =new_key.replace('encoder' ,'encoder.encoder' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __UpperCamelCase : Tuple =re.sub(r'layers_(\d+)' ,r'layer.\1' ,a_ ) __UpperCamelCase : Union[str, Any] =flax_dict[key] __UpperCamelCase : List[Any] ={} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __UpperCamelCase : int =torch.from_numpy(converted_dict[key].T ) else: __UpperCamelCase : Any =torch.from_numpy(converted_dict[key] ) return converted_torch_dict def A ( a_ ,a_ ,a_=False ,a_=False ) -> Dict: __UpperCamelCase : Dict =get_flax_param(a_ ) if not use_large: __UpperCamelCase : Optional[Any] =PixaStructVisionConfig() __UpperCamelCase : int =PixaStructTextConfig() else: __UpperCamelCase : Dict =PixaStructVisionConfig( hidden_size=1_536 ,d_ff=3_968 ,num_attention_heads=24 ,num_hidden_layers=18 ) __UpperCamelCase : List[Any] =PixaStructTextConfig(hidden_size=1_536 ,d_ff=3_968 ,num_heads=24 ,num_layers=18 ) __UpperCamelCase : List[Any] =PixaStructConfig( vision_config=encoder_config.to_dict() ,text_config=decoder_config.to_dict() ,is_vqa=a_ ) __UpperCamelCase : Union[str, Any] =PixaStructForConditionalGeneration(a_ ) __UpperCamelCase : Union[str, Any] =rename_and_convert_flax_params(a_ ) model.load_state_dict(a_ ) __UpperCamelCase : List[Any] =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' ) __UpperCamelCase : Optional[int] =PixaStructImageProcessor() __UpperCamelCase : List[str] =PixaStructProcessor(image_processor=a_ ,tokenizer=a_ ) if use_large: __UpperCamelCase : int =4_096 __UpperCamelCase : Any =True # mkdir if needed os.makedirs(a_ ,exist_ok=a_ ) model.save_pretrained(a_ ) processor.save_pretrained(a_ ) print('Model saved in {}'.format(a_ ) ) if __name__ == "__main__": A_ :str = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') A_ :List[Any] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
71
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
56
0
"""simple docstring""" import argparse from collections import defaultdict import yaml lowerCAmelCase__ = '''docs/source/en/_toctree.yml''' def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase : List[Any] = defaultdict(A_ ) _lowerCamelCase : List[Any] = [] _lowerCamelCase : Tuple = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(A_ ) _lowerCamelCase : Optional[Any] = new_doc_list _lowerCamelCase : Tuple = [key for key, value in counts.items() if value > 1] _lowerCamelCase : List[str] = [] for duplicate_key in duplicates: _lowerCamelCase : Optional[int] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(A_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) _lowerCamelCase : Optional[int] = sorted(A_, key=lambda A_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(A_ ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(A_ ) # Sort return overview_doc def snake_case_ ( A_ : str=False ): '''simple docstring''' with open(A_, encoding='''utf-8''' ) as f: _lowerCamelCase : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc _lowerCamelCase : Union[str, Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCamelCase : List[Any] = content[api_idx]['''sections'''] # Then to the model doc _lowerCamelCase : str = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCamelCase : List[str] = api_doc[scheduler_idx]['''sections'''] _lowerCamelCase : Dict = clean_doc_toc(A_ ) _lowerCamelCase : Tuple = False if new_scheduler_doc != scheduler_doc: _lowerCamelCase : List[str] = True if overwrite: _lowerCamelCase : str = new_scheduler_doc if diff: if overwrite: _lowerCamelCase : List[Any] = api_doc with open(A_, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def snake_case_ ( A_ : Any=False ): '''simple docstring''' with open(A_, encoding='''utf-8''' ) as f: _lowerCamelCase : Dict = yaml.safe_load(f.read() ) # Get to the API doc _lowerCamelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCamelCase : Union[str, Any] = content[api_idx]['''sections'''] # Then to the model doc _lowerCamelCase : Union[str, Any] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCamelCase : str = False _lowerCamelCase : Tuple = api_doc[pipeline_idx]['''sections'''] _lowerCamelCase : Any = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCamelCase : List[str] = pipeline_doc['''section'''] _lowerCamelCase : Optional[Any] = clean_doc_toc(A_ ) if overwrite: _lowerCamelCase : List[str] = new_sub_pipeline_doc new_pipeline_docs.append(A_ ) # sort overall pipeline doc _lowerCamelCase : Any = clean_doc_toc(A_ ) if new_pipeline_docs != pipeline_docs: _lowerCamelCase : Dict = True if overwrite: _lowerCamelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCamelCase : Union[str, Any] = api_doc with open(A_, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCAmelCase__ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
72
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''', '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''', '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' ) return name def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( ) -> Any: '''simple docstring''' snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(__UpperCAmelCase ) snake_case_ = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=__UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' ) # forward pass snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) a : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
56
0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]: __lowerCamelCase : List[Any] = len(lowerCamelCase__ ) while cur > 1: # Find the maximum number in arr __lowerCamelCase : List[Any] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __lowerCamelCase : Any = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )] # Reverse whole list __lowerCamelCase : str = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )] cur -= 1 return arr if __name__ == "__main__": a =input("""Enter numbers separated by a comma:\n""").strip() a =[int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
73
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": a : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
56
0
"""simple docstring""" import argparse import json import subprocess def _snake_case ( snake_case__ : str , snake_case__ : List[Any] ): A = [] A = ( F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) A = subprocess.run(snake_case__ , shell=snake_case__ , stdout=subprocess.PIPE ) A = output.stdout.decode('utf-8' ) A = json.loads(snake_case__ ) A = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(snake_case__ ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(snake_case__ ) ) if len(snake_case__ ) > 0: A = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def _snake_case ( snake_case__ : List[Any] ): return values.split(',' ) _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) _lowercase = parser.parse_args() get_runner_status(args.target_runners, args.token)
74
'''simple docstring''' import re from filelock import FileLock try: import nltk a : Union[str, Any] = True except (ImportError, ModuleNotFoundError): a : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
56
0
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : str = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =SpeechaTextTokenizer lowercase : int =False lowercase : List[str] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium' lowercase : Dict ='C\'est trop cool' lowercase : str ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
75
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
56
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , a : Optional[int] , a : str=13 , a : str=7 , a : List[Any]=True , a : List[str]=True , a : int=True , a : Any=True , a : Tuple=99 , a : int=32 , a : Union[str, Any]=5 , a : str=4 , a : Optional[Any]=37 , a : Optional[Any]="gelu" , a : Any=0.1 , a : Optional[Any]=0.1 , a : Any=512 , a : int=16 , a : Optional[int]=2 , a : Optional[int]=0.02 , a : str=4 , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = parent SCREAMING_SNAKE_CASE : List[str] = batch_size SCREAMING_SNAKE_CASE : Optional[Any] = seq_length SCREAMING_SNAKE_CASE : List[Any] = is_training SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask SCREAMING_SNAKE_CASE : Any = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size SCREAMING_SNAKE_CASE : int = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE : int = type_vocab_size SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : str = num_choices def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _UpperCamelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =True lowerCamelCase__ =( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = FlaxRoFormerModelTester(self ) @slow def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=a ) SCREAMING_SNAKE_CASE : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) SCREAMING_SNAKE_CASE : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE : Tuple = model(a )[0] SCREAMING_SNAKE_CASE : Optional[int] = 5_0000 SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size) self.assertEqual(output.shape , a ) SCREAMING_SNAKE_CASE : Any = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
76
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class a ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ): super().__init__() snake_case_ = initial_learning_rate snake_case_ = warmup_steps snake_case_ = power snake_case_ = decay_schedule_fn snake_case_ = name def __call__( self : Tuple , lowercase_ : str ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ = tf.cast(lowercase_ , tf.floataa ) snake_case_ = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ = global_step_float / warmup_steps_float snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , ) def A_ ( self : Any ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]: '''simple docstring''' snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, ) if num_warmup_steps: snake_case_ = WarmUp( initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, ) if weight_decay_rate > 0.0: snake_case_ = AdamWeightDecay( learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, ) else: snake_case_ = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class a ( _lowerCamelCase ): def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ): super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) snake_case_ = weight_decay_rate snake_case_ = include_in_weight_decay snake_case_ = exclude_from_weight_decay @classmethod def A_ ( cls : Dict , lowercase_ : Union[str, Any] ): snake_case_ = {'''WarmUp''': WarmUp} return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ): super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ ) snake_case_ = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ): snake_case_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ): snake_case_ ,snake_case_ = list(zip(*lowercase_ ) ) return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ ) def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ = apply_state or {} snake_case_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ ) snake_case_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def A_ ( self : Optional[int] , lowercase_ : int ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return False return True class a ( _lowerCamelCase ): def __init__( self : List[Any] ): snake_case_ = [] snake_case_ = None @property def A_ ( self : Union[str, Any] ): if self._accum_steps is None: snake_case_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def A_ ( self : Dict ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Any , lowercase_ : int ): if not self._gradients: snake_case_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase_ ) != len(self._gradients ): raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" ) for accum_gradient, gradient in zip(self._gradients , lowercase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase_ ) self._accum_steps.assign_add(1 ) def A_ ( self : Optional[int] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase_ ) )
56
0
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar _UpperCamelCase : Optional[int] = TypeVar("KT") _UpperCamelCase : Tuple = TypeVar("VT") class UpperCAmelCase_ ( Generic[KT, VT]): def __init__( self , a = "root" , a = None ) -> Dict: lowercase__ : str = key lowercase__ : int = value lowercase__ : list[Node[KT, VT]] = [] def __repr__( self ) -> str: return f"""Node({self.key}: {self.value})""" @property def _UpperCAmelCase ( self ) -> int: return len(self.forward ) class UpperCAmelCase_ ( Generic[KT, VT]): def __init__( self , a = 0.5 , a = 1_6 ) -> Any: lowercase__ : Node[KT, VT] = Node[KT, VT]() lowercase__ : Any = 0 lowercase__ : int = p lowercase__ : Optional[Any] = max_level def __str__( self ) -> str: lowercase__ : Any = list(self ) if len(a ) == 0: return f"""SkipList(level={self.level})""" lowercase__ : Optional[int] = max((len(str(a ) ) for item in items) , default=4 ) lowercase__ : Dict = max(a , 4 ) + 4 lowercase__ : Dict = self.head lowercase__ : Tuple = [] lowercase__ : Any = node.forward.copy() lines.append(f"""[{node.key}]""".ljust(a , '-' ) + '* ' * len(a ) ) lines.append(' ' * label_size + '| ' * len(a ) ) while len(node.forward ) != 0: lowercase__ : Union[str, Any] = node.forward[0] lines.append( f"""[{node.key}]""".ljust(a , '-' ) + ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) ) lines.append(' ' * label_size + '| ' * len(a ) ) lowercase__ : str = node.forward lines.append('None'.ljust(a ) + '* ' * len(a ) ) return f"""SkipList(level={self.level})\n""" + "\n".join(a ) def __iter__( self ) -> Any: lowercase__ : Dict = self.head while len(node.forward ) != 0: yield node.forward[0].key lowercase__ : List[str] = node.forward[0] def _UpperCAmelCase ( self ) -> int: lowercase__ : List[Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def _UpperCAmelCase ( self , a ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: lowercase__ : Optional[int] = [] lowercase__ : Tuple = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: lowercase__ : Dict = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(a ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _UpperCAmelCase ( self , a ) -> Dict: lowercase__ , lowercase__ : Optional[Any] = self._locate_node(a ) if node is not None: for i, update_node in enumerate(a ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: lowercase__ : List[str] = node.forward[i] else: lowercase__ : Optional[int] = update_node.forward[:i] def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ , lowercase__ : str = self._locate_node(a ) if node is not None: lowercase__ : Optional[int] = value else: lowercase__ : List[str] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , a ): update_vector.append(self.head ) lowercase__ : List[Any] = level lowercase__ : Tuple = Node(a , a ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(a ) else: lowercase__ : str = new_node def _UpperCAmelCase ( self , a ) -> VT | None: lowercase__ , lowercase__ : Optional[Any] = self._locate_node(a ) if node is not None: return node.value return None def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 3 ) skip_list.insert('Key2' , 12 ) skip_list.insert('Key3' , 41 ) skip_list.insert('Key4' , -19 ) lowercase__ : Dict = skip_list.head lowercase__ : Dict = {} while node.level != 0: lowercase__ : Dict = node.forward[0] lowercase__ : int = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 10 ) skip_list.insert('Key1' , 12 ) skip_list.insert('Key5' , 7 ) skip_list.insert('Key7' , 10 ) skip_list.insert('Key10' , 5 ) skip_list.insert('Key7' , 7 ) skip_list.insert('Key5' , 5 ) skip_list.insert('Key10' , 10 ) lowercase__ : Dict = skip_list.head lowercase__ : Tuple = {} while node.level != 0: lowercase__ : Any = node.forward[0] lowercase__ : Optional[int] = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = SkipList() assert skip_list.find('Some key' ) is None def a_ ( ): '''simple docstring''' lowercase__ : List[str] = SkipList() skip_list.insert('Key2' , 20 ) assert skip_list.find('Key2' ) == 20 skip_list.insert('Some Key' , 10 ) skip_list.insert('Key2' , 8 ) skip_list.insert('V' , 13 ) assert skip_list.find('Y' ) is None assert skip_list.find('Key2' ) == 8 assert skip_list.find('Some Key' ) == 10 assert skip_list.find('V' ) == 13 def a_ ( ): '''simple docstring''' lowercase__ : str = SkipList() skip_list.delete('Some key' ) assert len(skip_list.head.forward ) == 0 def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 14 ) skip_list.insert('Key2' , 15 ) skip_list.delete('V' ) skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('Key2' ) is None def a_ ( ): '''simple docstring''' lowercase__ : List[str] = SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 14 ) skip_list.insert('Key2' , 15 ) skip_list.delete('V' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) == 14 assert skip_list.find('Key1' ) == 12 assert skip_list.find('Key2' ) == 15 skip_list.delete('X' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) == 12 assert skip_list.find('Key2' ) == 15 skip_list.delete('Key1' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) == 15 skip_list.delete('Key2' ) assert skip_list.find('V' ) is None assert skip_list.find('X' ) is None assert skip_list.find('Key1' ) is None assert skip_list.find('Key2' ) is None def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = SkipList() skip_list.insert('Key1' , 12 ) skip_list.insert('V' , 13 ) skip_list.insert('X' , 142 ) skip_list.insert('Key2' , 15 ) skip_list.delete('X' ) def traverse_keys(_lowerCAmelCase : Tuple ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def a_ ( ): '''simple docstring''' def is_sorted(_lowerCAmelCase : Dict ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) lowercase__ : int = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def a_ ( ): '''simple docstring''' for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = SkipList() skip_list.insert(2 , '2' ) skip_list.insert(4 , '4' ) skip_list.insert(6 , '4' ) skip_list.insert(4 , '5' ) skip_list.insert(8 , '4' ) skip_list.insert(9 , '4' ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
77
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = "sample" snake_case_ = 1e-2 @property def A_ ( self : Dict ): snake_case_ = 4 snake_case_ = 3 snake_case_ = (32, 32) snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ ) return {"sample": image} @property def A_ ( self : List[Any] ): return (3, 32, 32) @property def A_ ( self : Dict ): return (3, 32, 32) def A_ ( self : Union[str, Any] ): snake_case_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict def A_ ( self : Any ): pass def A_ ( self : str ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def A_ ( self : Dict ): # enable deterministic behavior for gradient checkpointing snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.model_class(**lowercase_ ) model.to(lowercase_ ) assert not model.is_gradient_checkpointing and model.training snake_case_ = model(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() snake_case_ = torch.randn_like(lowercase_ ) snake_case_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing snake_case_ = self.model_class(**lowercase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training snake_case_ = model_a(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() snake_case_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) snake_case_ = dict(model.named_parameters() ) snake_case_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def A_ ( self : Tuple ): snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase_ ) snake_case_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A_ ( self : Tuple ): snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) snake_case_ = model.to(lowercase_ ) model.eval() if torch_device == "mps": snake_case_ = torch.manual_seed(0 ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ = image.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample snake_case_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": snake_case_ = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": snake_case_ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: snake_case_ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) ) @slow class a ( unittest.TestCase ): def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ): return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy" def A_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ): snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ ) return image def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ): snake_case_ = '''fp16''' if fpaa else None snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = AutoencoderKL.from_pretrained( lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , ) model.to(lowercase_ ).eval() return model def A_ ( self : Any , lowercase_ : int=0 ): if torch_device == "mps": return torch.manual_seed(lowercase_ ) return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : List[str] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : Any ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model.encode(lowercase_ ).latent_dist snake_case_ = dist.sample(generator=lowercase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
56
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) snake_case_ = logging.getLogger(__name__) @dataclass class A_ : """simple docstring""" __UpperCamelCase = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __UpperCamelCase = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __UpperCamelCase = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __UpperCamelCase = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether tp freeze the encoder."""} ) __UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class A_ : """simple docstring""" __UpperCamelCase = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) __UpperCamelCase = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) __UpperCamelCase = field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __UpperCamelCase = field( default=1_28 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __UpperCamelCase = field( default=1_42 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) __UpperCamelCase = field( default=1_42 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __UpperCamelCase = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) __UpperCamelCase = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) __UpperCamelCase = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) __UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Source language id for translation."""} ) __UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Target language id for translation."""} ) __UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """# num_beams to use for evaluation."""} ) __UpperCamelCase = field( default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) ) def _lowerCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() check_output_dir(lowercase_ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , lowercase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(lowercase_ , lowercase_ , lowercase_ ): assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(lowercase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(lowercase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase = SeqaSeqDataset # Get datasets UpperCAmelCase = ( dataset_class( lowercase_ , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) UpperCAmelCase = ( dataset_class( lowercase_ , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase = ( dataset_class( lowercase_ , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase = ( build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None ) UpperCAmelCase = SeqaSeqTrainer( model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator( lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , ) UpperCAmelCase = {} # Training if training_args.do_train: logger.info('*** Train ***' ) UpperCAmelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase = train_result.metrics UpperCAmelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) UpperCAmelCase = trainer.evaluate(metric_key_prefix='val' ) UpperCAmelCase = data_args.n_val UpperCAmelCase = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.do_predict: logger.info('*** Predict ***' ) UpperCAmelCase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix='test' ) UpperCAmelCase = test_output.metrics UpperCAmelCase = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase = round(metrics['test_loss'] , 4 ) handle_metrics('test' , lowercase_ , training_args.output_dir ) all_metrics.update(lowercase_ ) if training_args.predict_with_generate: UpperCAmelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) UpperCAmelCase = lmap(str.strip , lowercase_ ) write_txt_file(lowercase_ , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(lowercase_ , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def _lowerCAmelCase ( lowercase_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
78
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a ( _lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1_280, 1_280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ): # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ ,snake_case_ = jax.random.split(lowercase_ ) snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"] def A_ ( self : List[str] ): snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype ) snake_case_ = self.only_cross_attention if isinstance(lowercase_ , lowercase_ ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_ , lowercase_ ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase_ ) snake_case_ = down_blocks # mid snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case_ = [] snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )] snake_case_ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case_ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase_ ) snake_case_ = output_channel snake_case_ = up_blocks # out snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ): # 1. time if not isinstance(lowercase_ , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(lowercase_ , 0 ) snake_case_ = self.time_proj(lowercase_ ) snake_case_ = self.time_embedding(lowercase_ ) # 2. pre-process snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) ) snake_case_ = self.conv_in(lowercase_ ) # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_ , lowercase_ ): snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) else: snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case_ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_ , lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case_ = new_down_block_res_samples # 4. mid snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_ , lowercase_ ): snake_case_ = up_block( lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , ) else: snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train ) # 6. post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = nn.silu(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
56
0
'''simple docstring''' def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) _A = str(bin(__lowercase ) ) binary_number += "0" * shift_amount return binary_number def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) _A = str(bin(__lowercase ) )[2:] if shift_amount >= len(__lowercase ): return "0b0" _A = binary_number[: len(__lowercase ) - shift_amount] return "0b" + shifted_binary_number def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number _A = "0" + str(bin(__lowercase ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number _A = len(bin(__lowercase )[3:] ) # Find 2's complement of number _A = bin(abs(__lowercase ) - (1 << binary_number_length) )[3:] _A = ( "1" + "0" * (binary_number_length - len(__lowercase )) + binary_number ) if shift_amount >= len(__lowercase ): return "0b" + binary_number[0] * len(__lowercase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowercase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a : Dict = (720, 1280) # Height, Width a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it. a : Dict = 1 / 100 a : str = '' a : Any = '' a : Optional[int] = '' a : List[str] = 250 def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase ) for index in range(__UpperCAmelCase ): snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 ) snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0] snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) snake_case_ = [] for anno in new_annos: snake_case_ = anno[3] - anno[1] snake_case_ = anno[4] - anno[2] snake_case_ = anno[1] + width / 2 snake_case_ = anno[2] + height / 2 snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__UpperCAmelCase ) with open(F"{file_root}.txt", '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]: '''simple docstring''' snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0] with open(__UpperCAmelCase ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' ) snake_case_ = float(obj[1] ) - float(obj[3] ) / 2 snake_case_ = float(obj[2] ) - float(obj[4] ) / 2 snake_case_ = float(obj[1] ) + float(obj[3] ) / 2 snake_case_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__UpperCAmelCase ) labels.append(__UpperCAmelCase ) return img_paths, labels def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]: '''simple docstring''' snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta ) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = int(scale_x * output_size[1] ) snake_case_ = int(scale_y * output_size[0] ) snake_case_ = [] snake_case_ = [] for i, index in enumerate(__UpperCAmelCase ): snake_case_ = all_img_list[index] path_list.append(__UpperCAmelCase ) snake_case_ = all_annos[index] snake_case_ = cva.imread(__UpperCAmelCase ) if i == 0: # top-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = bbox[2] * scale_y snake_case_ = bbox[3] * scale_x snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = bbox[2] * scale_y snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = bbox[3] * scale_x snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right snake_case_ = cva.resize( __UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: snake_case_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
56
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( a__ , unittest.TestCase ): __UpperCAmelCase = DiTPipeline __UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCAmelCase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } __UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCAmelCase = False def __a ( self ): torch.manual_seed(0 ) UpperCamelCase__ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a , ) UpperCamelCase__ = AutoencoderKL() UpperCamelCase__ = DDIMScheduler() UpperCamelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __a ( self , a , a=0 ): if str(a ).startswith("mps" ): UpperCamelCase__ = torch.manual_seed(a ) else: UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a ) UpperCamelCase__ = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self ): UpperCamelCase__ = "cpu" UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = self.pipeline_class(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) UpperCamelCase__ = self.get_dummy_inputs(a ) UpperCamelCase__ = pipe(**a ).images UpperCamelCase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) UpperCamelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) UpperCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a , 1e-3 ) def __a ( self ): self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class lowercase_ ( unittest.TestCase ): def __a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) UpperCamelCase__ = ["vase", "umbrella", "white shark", "white wolf"] UpperCamelCase__ = pipe.get_label_ids(a ) UpperCamelCase__ = pipe(a , generator=a , num_inference_steps=40 , output_type="np" ).images for word, image in zip(a , a ): UpperCamelCase__ = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def __a ( self ): UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) UpperCamelCase__ = ["vase", "umbrella"] UpperCamelCase__ = pipe.get_label_ids(a ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe(a , generator=a , num_inference_steps=25 , output_type="np" ).images for word, image in zip(a , a ): UpperCamelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
80
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a : @staticmethod def A_ ( *lowercase_ : int , **lowercase_ : str ): pass @is_pipeline_test @require_vision @require_timm @require_torch class a ( unittest.TestCase ): snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ): snake_case_ = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def A_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : int ): snake_case_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) import datasets snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case_ = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] snake_case_ = object_detector(lowercase_ , threshold=0.0 ) self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for outputs in batch_outputs: self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def A_ ( self : int ): pass @require_torch def A_ ( self : Tuple ): snake_case_ = '''hf-internal-testing/tiny-detr-mobilenetsv3''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def A_ ( self : Optional[int] ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Tuple ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : str ): snake_case_ = 0.9985 snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def A_ ( self : Dict ): snake_case_ = '''Narsil/layoutlmv3-finetuned-funsd''' snake_case_ = 0.9993 snake_case_ = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ ) snake_case_ = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
56
0
"""simple docstring""" def _A ( lowercase , lowercase ): """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) ) else: return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) ) def _A ( lowercase , lowercase ): """simple docstring""" if b < 0: return 1 / actual_power(lowercase , lowercase ) return actual_power(lowercase , lowercase ) if __name__ == "__main__": print(power(-2, -3))
81
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def A_ ( self : List[str] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def A_ ( self : str ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Tuple ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ): snake_case_ = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , lowercase_ ) snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ): snake_case_ = self.num_labels snake_case_ = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.num_choices snake_case_ = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ): snake_case_ = self.num_labels snake_case_ = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : Union[str, Any] ): snake_case_ = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = True def A_ ( self : Tuple ): snake_case_ = MPNetModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def A_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ = model(lowercase_ )[0] snake_case_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase_ ) snake_case_ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
56
0
from collections import defaultdict from math import ceil, sqrt def _UpperCAmelCase ( snake_case = 1_00_00_00 , snake_case = 10 ): """simple docstring""" _lowerCAmelCase = defaultdict(snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
82
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class a ( _lowerCamelCase ): def A_ ( self : str ): snake_case_ = tempfile.mkdtemp() snake_case_ = 8 # DPR tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase_ ) ) def A_ ( self : Union[str, Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : Union[str, Any] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : int ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def A_ ( self : str ): shutil.rmtree(self.tmpdirname ) def A_ ( self : str ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def A_ ( self : str ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def A_ ( self : str , lowercase_ : bool ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: snake_case_ = os.path.join(self.tmpdirname , '''dataset''' ) snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , ) return retriever def A_ ( self : Tuple ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) ) snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def A_ ( self : Optional[Any] ): snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : str ): snake_case_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = self.get_dummy_dataset() retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : int ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : str ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : Any ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : Any ): snake_case_ = 1 snake_case_ = self.get_dummy_legacy_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : List[str] ): import torch snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) snake_case_ ,snake_case_ ,snake_case_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , np.ndarray ) snake_case_ = retriever( lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , ) snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : Tuple ): snake_case_ = self.get_dpr_ctx_encoder_tokenizer() snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) retriever.set_ctx_encoder_tokenizer(lowercase_ ) snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) self.assertEqual( len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
56
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : int = 16 snake_case_ : int = 32 def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = 1_6 , UpperCAmelCase_ = "bert-base-cased" ): _UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) _UpperCamelCase : List[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(UpperCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _UpperCamelCase : int = datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(UpperCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' ) return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _UpperCamelCase : Any = DataLoader( tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) _UpperCamelCase : int = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) return train_dataloader, eval_dataloader def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): # Initialize accelerator _UpperCamelCase : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase : Union[str, Any] = config['lr'] _UpperCamelCase : Optional[Any] = int(config['num_epochs'] ) _UpperCamelCase : str = int(config['seed'] ) _UpperCamelCase : List[Any] = int(config['batch_size'] ) _UpperCamelCase : int = args.model_name_or_path set_seed(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Dict = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase : str = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ ) # Instantiate optimizer _UpperCamelCase : Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _UpperCamelCase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: _UpperCamelCase : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _UpperCamelCase : List[Any] = 1 _UpperCamelCase : str = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _UpperCamelCase : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , ) else: _UpperCamelCase : str = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = accelerator.prepare( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # We need to keep track of how many total steps we have iterated over _UpperCamelCase : str = 0 # We also need to keep track of the stating epoch so files are named properly _UpperCamelCase : int = 0 # Now we train the model _UpperCamelCase : Any = evaluate.load('glue' , 'mrpc' ) _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : str = {} for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ): model.train() for step, batch in enumerate(UpperCAmelCase_ ): _UpperCamelCase : Dict = model(**UpperCAmelCase_ ) _UpperCamelCase : Dict = outputs.loss _UpperCamelCase : List[str] = loss / gradient_accumulation_steps accelerator.backward(UpperCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _UpperCamelCase : int = 0 for step, batch in enumerate(UpperCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase : Any = model(**UpperCAmelCase_ ) _UpperCamelCase : int = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _UpperCamelCase , _UpperCamelCase : List[str] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCAmelCase_ ) - 1: _UpperCamelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] _UpperCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , ) _UpperCamelCase : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: _UpperCamelCase : Dict = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) def A__ ( ): _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , ) parser.add_argument( '--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , ) _UpperCamelCase : List[Any] = parser.parse_args() _UpperCamelCase : Optional[int] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6} training_function(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
83
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a : Dict = None a : List[Any] = logging.get_logger(__name__) a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a : str = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a : List[Any] = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class a ( _lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = TaTokenizer snake_case_ = [] def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = extra_ids @staticmethod def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , ) return max_model_length def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return snake_case_ = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) logger.info(F"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: snake_case_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A_ ( self : Dict ): return list( set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) ) def A_ ( self : Any ): return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
56
0
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
84
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
'''simple docstring''' from __future__ import annotations import os from collections.abc import Mapping _SCREAMING_SNAKE_CASE : Tuple = tuple[int, int] class _snake_case : def __init__( self , a__ , a__ ) -> None: '''simple docstring''' snake_case_ = vertices snake_case_ = { (min(a__ ), max(a__ )): weight for edge, weight in edges.items() } def lowerCAmelCase__ ( self , a__ , a__ ) -> None: '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) snake_case_ = weight def lowerCAmelCase__ ( self ) -> Graph: '''simple docstring''' snake_case_ = Graph({min(self.vertices )} , {} ) snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 while len(subgraph.vertices ) < len(self.vertices ): snake_case_ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: snake_case_ = edge snake_case_ = weight subgraph.add_edge(a__ , a__ ) return subgraph def UpperCamelCase_( snake_case : str = "p107_network.txt" ): '''simple docstring''' snake_case_ = os.path.abspath(os.path.dirname(snake_case ) ) snake_case_ = os.path.join(snake_case , snake_case ) snake_case_ = {} snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 with open(snake_case ) as f: snake_case_ = f.read().strip().split("\n" ) snake_case_ = [line.split("," ) for line in data] for edgea in range(1 , len(snake_case ) ): for edgea in range(snake_case ): if adjaceny_matrix[edgea][edgea] != "-": snake_case_ = int(adjaceny_matrix[edgea][edgea] ) snake_case_ = Graph(set(range(len(snake_case ) ) ) , snake_case ) snake_case_ = graph.prims_algorithm() snake_case_ = sum(graph.edges.values() ) snake_case_ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
85
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' snake_case_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ ,snake_case_ = emb.weight.shape snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' ) snake_case_ = mam_aaa['''args'''] snake_case_ = mam_aaa['''model'''] snake_case_ = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case_ = args.share_decoder_input_output_embed snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case_ = SpeechaTextConfig( vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, ) snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F" but all the following weights are missing {missing}" ) if tie_embeds: snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
56
0
"""simple docstring""" import enum import shutil import sys lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size() lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class A__ ( enum.Enum): A_ : Union[str, Any] = 0 A_ : List[Any] = 1 def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="" ): sys.stdout.write(str(_UpperCamelCase ) + end ) sys.stdout.flush() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase ) def __lowerCAmelCase (): forceWrite('\r' ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" ) def __lowerCAmelCase (): forceWrite(' ' * TERMINAL_WIDTH ) reset_cursor() def __lowerCAmelCase (): reset_cursor() forceWrite('-' * TERMINAL_WIDTH )
86
'''simple docstring''' from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCamelCase ): snake_case_ = ["transformers", "torch", "note_seq"] def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
56
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class snake_case_ ( __A ): __A : List[Any] = "blenderbot-small" __A : Tuple = ["past_key_values"] __A : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Any , lowercase_ : Any=5_02_65 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Optional[int]=8 , lowercase_ : Tuple=20_48 , lowercase_ : Any=16 , lowercase_ : Optional[int]=8 , lowercase_ : Any=20_48 , lowercase_ : Any=16 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int="gelu" , lowercase_ : str=5_12 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1 , lowercase_ : int=False , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : List[str]=2 , **lowercase_ : Tuple , ) -> Union[str, Any]: lowercase__ : Any = vocab_size lowercase__ : int = max_position_embeddings lowercase__ : Optional[Any] = d_model lowercase__ : List[str] = encoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : List[Any] = encoder_attention_heads lowercase__ : List[str] = decoder_ffn_dim lowercase__ : Optional[Any] = decoder_layers lowercase__ : Union[str, Any] = decoder_attention_heads lowercase__ : int = dropout lowercase__ : Optional[int] = attention_dropout lowercase__ : Dict = activation_dropout lowercase__ : Union[str, Any] = activation_function lowercase__ : Dict = init_std lowercase__ : int = encoder_layerdrop lowercase__ : List[str] = decoder_layerdrop lowercase__ : str = use_cache lowercase__ : Dict = encoder_layers lowercase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : str = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowercase__ : Tuple = {0: "batch"} lowercase__ : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowercase__ : Dict = {0: "batch", 1: "decoder_sequence"} lowercase__ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase__ : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowercase__ , lowercase__ : Any = self.num_layers for i in range(lowercase_ ): lowercase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"} lowercase__ : Any = {0: "batch", 2: "past_sequence + sequence"} else: lowercase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Dict = super().outputs else: lowercase__ : List[str] = super(lowercase_ , self ).outputs if self.use_past: lowercase__ , lowercase__ : Optional[Any] = self.num_layers for i in range(lowercase_ ): lowercase__ : Dict = {0: "batch", 2: "past_sequence + sequence"} lowercase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase ( self : Tuple , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowercase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs lowercase__ : str = seq_length if not self.use_past else 1 lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowercase__ : Union[str, Any] = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase__ , lowercase__ : Union[str, Any] = common_inputs["input_ids"].shape lowercase__ : Optional[int] = common_inputs["decoder_input_ids"].shape[1] lowercase__ , lowercase__ : List[str] = self.num_attention_heads lowercase__ : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : List[str] = decoder_seq_length + 3 lowercase__ : Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase__ : Tuple = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) lowercase__ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase__ , lowercase__ : List[str] = self.num_layers lowercase__ : List[Any] = min(lowercase_ , lowercase_ ) lowercase__ : List[Any] = max(lowercase_ , lowercase_ ) - min_num_layers lowercase__ : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. lowercase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def __UpperCamelCase ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowercase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase__ , lowercase__ : str = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowercase__ : Dict = seqlen + 2 lowercase__ , lowercase__ : List[str] = self.num_layers lowercase__ , lowercase__ : Optional[Any] = self.num_attention_heads lowercase__ : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : Optional[int] = common_inputs["attention_mask"].dtype lowercase__ : List[Any] = torch.cat( [common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) lowercase__ : Dict = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase__ : List[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ ) lowercase__ : List[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence lowercase__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase__ : Union[str, Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def __UpperCamelCase ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) elif self.task == "causal-lm": lowercase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Any: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : str = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCAmelCase : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
88
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a : Dict = logging.get_logger(__name__) a : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class a ( _lowerCamelCase ): snake_case_ = "marian" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ): snake_case_ = vocab_size snake_case_ = decoder_vocab_size or vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class a ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A_ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ = {0: '''batch'''} snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A_ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(lowercase_ , self ).outputs if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape snake_case_ = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ ,snake_case_ = self.num_layers snake_case_ = min(lowercase_ , lowercase_ ) snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ ,snake_case_ = self.num_layers snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ ) snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: snake_case_ = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: snake_case_ = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def A_ ( self : List[str] ): return 1e-4
56
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __lowerCAmelCase = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } __lowerCAmelCase = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any: _a , _a : Union[str, Any] = create_model( 'HTSAT-tiny' , 'roberta' , lowerCAmelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=lowerCAmelCase_ , fusion_type='aff_2d' if enable_fusion else None , ) return model, model_cfg def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Optional[int] = {} _a : Tuple = r'.*sequential.(\d+).*' _a : int = r'.*_projection.(\d+).*' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a : Union[str, Any] = key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if re.match(lowerCAmelCase_ , lowerCAmelCase_ ): # replace sequential layers with list _a : List[str] = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) _a : Optional[Any] = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase_ )//3}.linear.""" ) elif re.match(lowerCAmelCase_ , lowerCAmelCase_ ): _a : str = int(re.match(lowerCAmelCase_ , lowerCAmelCase_ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _a : Optional[Any] = 1 if projecton_layer == 0 else 2 _a : int = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _a : str = value _a : List[str] = mixed_qkv.size(0 ) // 3 _a : str = mixed_qkv[:qkv_dim] _a : int = mixed_qkv[qkv_dim : qkv_dim * 2] _a : Any = mixed_qkv[qkv_dim * 2 :] _a : List[Any] = query_layer _a : Union[str, Any] = key_layer _a : Tuple = value_layer else: _a : Dict = value return model_state_dict def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: _a , _a : Optional[Any] = init_clap(lowerCAmelCase_ , enable_fusion=lowerCAmelCase_ ) clap_model.eval() _a : Tuple = clap_model.state_dict() _a : Optional[int] = rename_state_dict(lowerCAmelCase_ ) _a : List[str] = ClapConfig() _a : Tuple = enable_fusion _a : int = ClapModel(lowerCAmelCase_ ) # ignore the spectrogram embedding layer model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) transformers_config.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') __lowerCAmelCase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
89
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = CycleDiffusionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A_ ( self : Tuple ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ = CLIPTextModel(lowercase_ ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ): snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ = torch.manual_seed(lowercase_ ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def A_ ( self : Union[str, Any] ): snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A_ ( self : Union[str, Any] ): snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half''' ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Optional[int] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def A_ ( self : List[Any] ): return super().test_inference_batch_single_identical() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_save_load_optional_components() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class a ( unittest.TestCase ): def A_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Union[str, Any] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def A_ ( self : List[str] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2e-2
56
0
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" if ( (cp >= 0x4_e00 and cp <= 0x9_fff) or (cp >= 0x3_400 and cp <= 0x4_dbf) # or (cp >= 0x20_000 and cp <= 0x2a_6df) # or (cp >= 0x2a_700 and cp <= 0x2b_73f) # or (cp >= 0x2b_740 and cp <= 0x2b_81f) # or (cp >= 0x2b_820 and cp <= 0x2c_eaf) # or (cp >= 0xf_900 and cp <= 0xf_aff) or (cp >= 0x2f_800 and cp <= 0x2f_a1f) # ): # return True return False def lowerCamelCase_ ( UpperCamelCase__ : str ) -> List[str]: """simple docstring""" for char in word: __lowerCamelCase = ord(UpperCamelCase__ ) if not _is_chinese_char(UpperCamelCase__ ): return 0 return 1 def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" __lowerCamelCase = set() for token in tokens: __lowerCamelCase = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ ) if chinese_word: word_set.add(UpperCamelCase__ ) __lowerCamelCase = list(UpperCamelCase__ ) return word_list def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : set() ) -> Tuple: """simple docstring""" if not chinese_word_set: return bert_tokens __lowerCamelCase = max([len(UpperCamelCase__ ) for w in chinese_word_set] ) __lowerCamelCase = bert_tokens __lowerCamelCase , __lowerCamelCase = 0, len(UpperCamelCase__ ) while start < end: __lowerCamelCase = True if is_chinese(bert_word[start] ): __lowerCamelCase = min(end - start , UpperCamelCase__ ) for i in range(UpperCamelCase__ , 1 , -1 ): __lowerCamelCase = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __lowerCamelCase = '##' + bert_word[j] __lowerCamelCase = start + i __lowerCamelCase = False break if single_word: start += 1 return bert_word def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : LTP , UpperCamelCase__ : BertTokenizer ) -> Optional[int]: """simple docstring""" __lowerCamelCase = [] for i in range(0 , len(UpperCamelCase__ ) , 100 ): __lowerCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] __lowerCamelCase = [get_chinese_word(UpperCamelCase__ ) for r in res] ltp_res.extend(UpperCamelCase__ ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) __lowerCamelCase = [] for i in range(0 , len(UpperCamelCase__ ) , 100 ): __lowerCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) __lowerCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ): __lowerCamelCase = [] for id in input_ids: __lowerCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase__ ) input_tokens.append(UpperCamelCase__ ) __lowerCamelCase = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase__ ): if token[:2] == "##": __lowerCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ): ref_id.append(UpperCamelCase__ ) ref_ids.append(UpperCamelCase__ ) assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) return ref_ids def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: __lowerCamelCase = f.readlines() __lowerCamelCase = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __lowerCamelCase = LTP(args.ltp ) # faster in GPU device __lowerCamelCase = BertTokenizer.from_pretrained(args.bert ) __lowerCamelCase = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: __lowerCamelCase = [json.dumps(UpperCamelCase__ ) + '\n' for ref in ref_ids] f.writelines(UpperCamelCase__ ) if __name__ == "__main__": __A = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") __A = parser.parse_args() main(args)
90
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a ( _lowerCamelCase ): snake_case_ = "big_bird" def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ): super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = rescale_embeddings snake_case_ = attention_type snake_case_ = use_bias snake_case_ = block_size snake_case_ = num_random_blocks snake_case_ = classifier_dropout class a ( _lowerCamelCase ): @property def A_ ( self : str ): if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
56
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = ["image_processor", "tokenizer"] __UpperCamelCase = "CLIPImageProcessor" __UpperCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : List[Any] , lowercase_ : Dict=None , lowercase_ : List[str]=None , **lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''feature_extractor''') SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(lowercase_ , lowercase_) def __call__( self : str , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Dict=None , **lowercase_ : Any): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''') if text is not None: SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_) if images is not None: SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_) if text is not None and images is not None: SCREAMING_SNAKE_CASE_ : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : int , **lowercase_ : Any): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase_ , **lowercase_) def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int]): '''simple docstring''' return self.tokenizer.decode(*lowercase_ , **lowercase_) @property def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , ) return self.image_processor_class @property def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , ) return self.image_processor
91
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str: '''simple docstring''' assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ = SqlDatasetReader( '''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} snake_case_ = features.copy() if features else default_expected_features snake_case_ = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con: snake_case_ = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() with pytest.raises(__UpperCAmelCase ): SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
56
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = R""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class a__ ( snake_case__ ): @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class a__ ( snake_case__ ): def __init__( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = max_length __lowerCAmelCase = max_position_embeddings @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" __lowerCAmelCase = input_ids.shape[-1] __lowerCAmelCase = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ "exceptions, performance degradation, or nothing at all." ) return is_done class a__ ( snake_case__ ): def __init__( self , _A , _A ): """simple docstring""" warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ "with `max_length = start_length + max_new_tokens` instead." , _A , ) __lowerCAmelCase = start_length __lowerCAmelCase = max_new_tokens __lowerCAmelCase = start_length + max_new_tokens @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return input_ids.shape[-1] >= self.max_length class a__ ( snake_case__ ): def __init__( self , _A , _A = None ): """simple docstring""" __lowerCAmelCase = max_time __lowerCAmelCase = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class a__ ( snake_case__ ): @add_start_docstrings(_A ) def __call__( self , _A , _A , **_A ): """simple docstring""" return any(criteria(_A , _A ) for criteria in self ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for stopping_criterium in self: if isinstance(_A , _A ): return stopping_criterium.max_length elif isinstance(_A , _A ): return stopping_criterium.max_length return None def _a ( SCREAMING_SNAKE_CASE_ : StoppingCriteriaList , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = stopping_criteria.max_length __lowerCAmelCase = deepcopy(SCREAMING_SNAKE_CASE_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , SCREAMING_SNAKE_CASE_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) ) return new_stopping_criteria
92
'''simple docstring''' from collections import defaultdict def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCAmelCase ) if ret % 2 == 0: cuts.append(__UpperCAmelCase ) return ret def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a ,a : Dict = 10, 9 a : Dict = defaultdict(list) a : dict[int, bool] = {} a : list[int] = [] a : Tuple = 0 a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
56
0
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) _lowercase : Optional[int] = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ): """simple docstring""" inspect_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = path + '''.py''' assert script_name in os.listdir(__SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" inspect_metric(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : str = path + '''.py''' assert script_name in os.listdir(__SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(__SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" lowercase_ : Optional[int] = get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" with pytest.raises(__SCREAMING_SNAKE_CASE ): get_dataset_config_info(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" lowercase_ : Dict = get_dataset_config_names(__SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" lowercase_ : Union[str, Any] = get_dataset_infos(__SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs lowercase_ : List[Any] = expected_configs[0] assert expected_config in infos lowercase_ : Tuple = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" lowercase_ : Dict = get_dataset_infos(__SCREAMING_SNAKE_CASE ) assert expected_config in infos lowercase_ : List[Any] = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" with pytest.raises(__SCREAMING_SNAKE_CASE ): get_dataset_split_names(__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE )
93
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
56
0
import numpy as np snake_case : List[str] = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class _snake_case : def __init__( self ): a :Dict = np.array(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a , a :Optional[int] = np.where(letter == self.SQUARE ) a :List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): a :Tuple = self.SQUARE[indexa - 1, indexa - 1] return letter def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[str] = message.lower() a :str = message.replace(''' ''' , '''''' ) a :Optional[Any] = message.replace('''j''' , '''i''' ) a :Optional[Any] = np.empty((2, len(_lowerCamelCase )) ) for letter_index in range(len(_lowerCamelCase ) ): a :str = self.letter_to_numbers(message[letter_index] ) a :Union[str, Any] = numbers[0] a :Dict = numbers[1] a :Optional[Any] = first_step.reshape(2 * len(_lowerCamelCase ) ) a :List[Any] = '''''' for numbers_index in range(len(_lowerCamelCase ) ): a :Tuple = int(second_step[numbers_index * 2] ) a :List[Any] = int(second_step[(numbers_index * 2) + 1] ) a :Dict = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase ) a :Dict = encoded_message + letter return encoded_message def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :str = message.lower() message.replace(''' ''' , '''''' ) a :Any = np.empty(2 * len(_lowerCamelCase ) ) for letter_index in range(len(_lowerCamelCase ) ): a :Union[str, Any] = self.letter_to_numbers(message[letter_index] ) a :int = numbers[0] a :Optional[Any] = numbers[1] a :Optional[Any] = first_step.reshape((2, len(_lowerCamelCase )) ) a :Tuple = '''''' for numbers_index in range(len(_lowerCamelCase ) ): a :Union[str, Any] = int(second_step[0, numbers_index] ) a :Union[str, Any] = int(second_step[1, numbers_index] ) a :Tuple = self.numbers_to_letter(_lowerCamelCase , _lowerCamelCase ) a :Union[str, Any] = decoded_message + letter return decoded_message
94
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''', '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''', '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' ) return name def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( ) -> Any: '''simple docstring''' snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(__UpperCAmelCase ) snake_case_ = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=__UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' ) # forward pass snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) a : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
56
0
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class __lowerCAmelCase ( UpperCamelCase__): def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None: '''simple docstring''' warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
95
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": a : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
56
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """mobilenet_v1""" def __init__( self , lowercase=3 , lowercase=224 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.9_99 , lowercase=0.02 , lowercase=0.0_01 , **lowercase , ): super().__init__(**lowercase ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) _lowerCamelCase : Optional[int] = num_channels _lowerCamelCase : Any = image_size _lowerCamelCase : str = depth_multiplier _lowerCamelCase : Dict = min_depth _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Union[str, Any] = tf_padding _lowerCamelCase : str = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : int = layer_norm_eps class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = version.parse("""1.11""" ) @property def A_ ( self ): return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def A_ ( self ): if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def A_ ( self ): return 1E-4
96
'''simple docstring''' import re from filelock import FileLock try: import nltk a : Union[str, Any] = True except (ImportError, ModuleNotFoundError): a : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
56
0
'''simple docstring''' from __future__ import annotations from decimal import Decimal from numpy import array def a ( __a ) -> list[list[float]]: '''simple docstring''' UpperCamelCase__ :List[str] = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix UpperCamelCase__ :Optional[int] = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements UpperCamelCase__ :List[Any] = [[0.0, 0.0], [0.0, 0.0]] UpperCamelCase__ , UpperCamelCase__ :int = matrix[1][1], matrix[0][0] UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(__a ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule UpperCamelCase__ :Tuple = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix UpperCamelCase__ :Any = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] UpperCamelCase__ :int = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) UpperCamelCase__ :Union[str, Any] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) UpperCamelCase__ :Tuple = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) UpperCamelCase__ :Any = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) UpperCamelCase__ :Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) UpperCamelCase__ :Tuple = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) UpperCamelCase__ :List[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) UpperCamelCase__ :str = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) UpperCamelCase__ :Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) UpperCamelCase__ :Optional[int] = array(__a ) for i in range(3 ): for j in range(3 ): UpperCamelCase__ :Optional[int] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix UpperCamelCase__ :str = array(__a ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(__a ) # Calculate the inverse of the matrix return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
97
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
56
0
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = to_pil_image(lowerCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ = pil_image.size UpperCAmelCase__ = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type='dict' , config=lowerCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates UpperCAmelCase__ = [idx for idx, word in enumerate(lowerCamelCase ) if not word.strip()] UpperCAmelCase__ = [word for idx, word in enumerate(lowerCamelCase ) if idx not in irrelevant_indices] UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices] UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices] UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices] UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase__ = [] for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase ) # finally, normalize the bounding boxes UpperCAmelCase__ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase ) ) assert len(lowerCamelCase ) == len(lowerCamelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = ["pixel_values"] def __init__( self : int ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : Any ,): super().__init__(**lowerCamelCase__ ) UpperCAmelCase__ = size if size is not None else {'height': 224, 'width': 224} UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ) UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = resample UpperCAmelCase__ = do_rescale UpperCAmelCase__ = rescale_value UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase__ = apply_ocr UpperCAmelCase__ = ocr_lang UpperCAmelCase__ = tesseract_config def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,): UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) UpperCAmelCase__ = (size['height'], size['width']) return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,): return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[int] ,): return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,): UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ = size if size is not None else self.size UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ) UpperCAmelCase__ = resample if resample is not None else self.resample UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ = image_std if image_std is not None else self.image_std UpperCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self ,'pytesseract' ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] for image in images: UpperCAmelCase__ , UpperCAmelCase__ = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) words_batch.append(lowerCamelCase__ ) boxes_batch.append(lowerCamelCase__ ) if do_resize: UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images] if do_rescale: UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images] if do_normalize: UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images] UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images] UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ ) if apply_ocr: UpperCAmelCase__ = words_batch UpperCAmelCase__ = boxes_batch return data
98
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class a ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ): super().__init__() snake_case_ = initial_learning_rate snake_case_ = warmup_steps snake_case_ = power snake_case_ = decay_schedule_fn snake_case_ = name def __call__( self : Tuple , lowercase_ : str ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ = tf.cast(lowercase_ , tf.floataa ) snake_case_ = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ = global_step_float / warmup_steps_float snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , ) def A_ ( self : Any ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]: '''simple docstring''' snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, ) if num_warmup_steps: snake_case_ = WarmUp( initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, ) if weight_decay_rate > 0.0: snake_case_ = AdamWeightDecay( learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, ) else: snake_case_ = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class a ( _lowerCamelCase ): def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ): super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) snake_case_ = weight_decay_rate snake_case_ = include_in_weight_decay snake_case_ = exclude_from_weight_decay @classmethod def A_ ( cls : Dict , lowercase_ : Union[str, Any] ): snake_case_ = {'''WarmUp''': WarmUp} return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ): super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ ) snake_case_ = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ): snake_case_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ): snake_case_ ,snake_case_ = list(zip(*lowercase_ ) ) return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ ) def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ = apply_state or {} snake_case_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ ) snake_case_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def A_ ( self : Optional[int] , lowercase_ : int ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return False return True class a ( _lowerCamelCase ): def __init__( self : List[Any] ): snake_case_ = [] snake_case_ = None @property def A_ ( self : Union[str, Any] ): if self._accum_steps is None: snake_case_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def A_ ( self : Dict ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Any , lowercase_ : int ): if not self._gradients: snake_case_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase_ ) != len(self._gradients ): raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" ) for accum_gradient, gradient in zip(self._gradients , lowercase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase_ ) self._accum_steps.assign_add(1 ) def A_ ( self : Optional[int] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase_ ) )
56
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def A_ ( A__ ) -> Tuple: # A local function to see if a dot lands in the circle. def is_in_circle(A__ , A__ ) -> bool: a__ : List[str] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a__ : List[str] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(A__ ) ) # The ratio of the area for circle to square is pi/4. a__ : Optional[Any] = proportion * 4 print(F'The estimated value of pi is {pi_estimate}' ) print(F'The numpy value of pi is {pi}' ) print(F'The total error is {abs(pi - pi_estimate )}' ) def A_ ( A__ , A__ , A__ = 0.0 , A__ = 1.0 , ) -> float: return mean( function_to_integrate(uniform(A__ , A__ ) ) for _ in range(A__ ) ) * (max_value - min_value) def A_ ( A__ , A__ = 0.0 , A__ = 1.0 ) -> None: def identity_function(A__ ) -> float: return x a__ : List[Any] = area_under_curve_estimator( A__ , A__ , A__ , A__ ) a__ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {expected_value}' ) print(F'Total error is {abs(estimated_value - expected_value )}' ) print('******************' ) def A_ ( A__ ) -> None: def function_to_integrate(A__ ) -> float: return sqrt(4.0 - x * x ) a__ : Dict = area_under_curve_estimator( A__ , A__ , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {pi}' ) print(F'Total error is {abs(estimated_value - pi )}' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
99
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = "sample" snake_case_ = 1e-2 @property def A_ ( self : Dict ): snake_case_ = 4 snake_case_ = 3 snake_case_ = (32, 32) snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ ) return {"sample": image} @property def A_ ( self : List[Any] ): return (3, 32, 32) @property def A_ ( self : Dict ): return (3, 32, 32) def A_ ( self : Union[str, Any] ): snake_case_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict def A_ ( self : Any ): pass def A_ ( self : str ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def A_ ( self : Dict ): # enable deterministic behavior for gradient checkpointing snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.model_class(**lowercase_ ) model.to(lowercase_ ) assert not model.is_gradient_checkpointing and model.training snake_case_ = model(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() snake_case_ = torch.randn_like(lowercase_ ) snake_case_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing snake_case_ = self.model_class(**lowercase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training snake_case_ = model_a(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() snake_case_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) snake_case_ = dict(model.named_parameters() ) snake_case_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def A_ ( self : Tuple ): snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase_ ) snake_case_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A_ ( self : Tuple ): snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) snake_case_ = model.to(lowercase_ ) model.eval() if torch_device == "mps": snake_case_ = torch.manual_seed(0 ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ = image.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample snake_case_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": snake_case_ = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": snake_case_ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: snake_case_ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) ) @slow class a ( unittest.TestCase ): def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ): return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy" def A_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ): snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ ) return image def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ): snake_case_ = '''fp16''' if fpaa else None snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = AutoencoderKL.from_pretrained( lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , ) model.to(lowercase_ ).eval() return model def A_ ( self : Any , lowercase_ : int=0 ): if torch_device == "mps": return torch.manual_seed(lowercase_ ) return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : List[str] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : Any ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model.encode(lowercase_ ).latent_dist snake_case_ = dist.sample(generator=lowercase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
56
0
"""simple docstring""" from __future__ import annotations from math import gcd def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = 2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: return (pow(UpperCamelCase_ , 2 ) + step) % modulus for _ in range(UpperCamelCase_ ): # These track the position within the cycle detection logic. __SCREAMING_SNAKE_CASE = seed __SCREAMING_SNAKE_CASE = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. __SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = rand_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. __SCREAMING_SNAKE_CASE = gcd(hare - tortoise , UpperCamelCase_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. __SCREAMING_SNAKE_CASE = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __magic_name__ = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __magic_name__ = parser.parse_args() __magic_name__ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"""{args.num} is probably prime""") else: __magic_name__ = args.num // divisor print(F"""{args.num} = {divisor} * {quotient}""")
100
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a ( _lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1_280, 1_280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ): # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ ,snake_case_ = jax.random.split(lowercase_ ) snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"] def A_ ( self : List[str] ): snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype ) snake_case_ = self.only_cross_attention if isinstance(lowercase_ , lowercase_ ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_ , lowercase_ ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase_ ) snake_case_ = down_blocks # mid snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case_ = [] snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )] snake_case_ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case_ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase_ ) snake_case_ = output_channel snake_case_ = up_blocks # out snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ): # 1. time if not isinstance(lowercase_ , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(lowercase_ , 0 ) snake_case_ = self.time_proj(lowercase_ ) snake_case_ = self.time_embedding(lowercase_ ) # 2. pre-process snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) ) snake_case_ = self.conv_in(lowercase_ ) # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_ , lowercase_ ): snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) else: snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case_ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_ , lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case_ = new_down_block_res_samples # 4. mid snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_ , lowercase_ ): snake_case_ = up_block( lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , ) else: snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train ) # 6. post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = nn.silu(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
56
0
import requests from bsa import BeautifulSoup def UpperCamelCase ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' lowercase = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , '''html.parser''' ) lowercase = soup.findAll('''h1''' ) lowercase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase__ , lowerCAmelCase__ )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(F'{key}\n{value}\n')
101
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a : Dict = (720, 1280) # Height, Width a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it. a : Dict = 1 / 100 a : str = '' a : Any = '' a : Optional[int] = '' a : List[str] = 250 def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase ) for index in range(__UpperCAmelCase ): snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 ) snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0] snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) snake_case_ = [] for anno in new_annos: snake_case_ = anno[3] - anno[1] snake_case_ = anno[4] - anno[2] snake_case_ = anno[1] + width / 2 snake_case_ = anno[2] + height / 2 snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__UpperCAmelCase ) with open(F"{file_root}.txt", '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]: '''simple docstring''' snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0] with open(__UpperCAmelCase ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' ) snake_case_ = float(obj[1] ) - float(obj[3] ) / 2 snake_case_ = float(obj[2] ) - float(obj[4] ) / 2 snake_case_ = float(obj[1] ) + float(obj[3] ) / 2 snake_case_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__UpperCAmelCase ) labels.append(__UpperCAmelCase ) return img_paths, labels def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]: '''simple docstring''' snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta ) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = int(scale_x * output_size[1] ) snake_case_ = int(scale_y * output_size[0] ) snake_case_ = [] snake_case_ = [] for i, index in enumerate(__UpperCAmelCase ): snake_case_ = all_img_list[index] path_list.append(__UpperCAmelCase ) snake_case_ = all_annos[index] snake_case_ = cva.imread(__UpperCAmelCase ) if i == 0: # top-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = bbox[2] * scale_y snake_case_ = bbox[3] * scale_x snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = bbox[2] * scale_y snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = bbox[3] * scale_x snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right snake_case_ = cva.resize( __UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: snake_case_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
56
0
"""simple docstring""" def lowercase ( _snake_case : str ) ->str: """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
102
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a : @staticmethod def A_ ( *lowercase_ : int , **lowercase_ : str ): pass @is_pipeline_test @require_vision @require_timm @require_torch class a ( unittest.TestCase ): snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ): snake_case_ = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def A_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : int ): snake_case_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) import datasets snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case_ = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] snake_case_ = object_detector(lowercase_ , threshold=0.0 ) self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for outputs in batch_outputs: self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def A_ ( self : int ): pass @require_torch def A_ ( self : Tuple ): snake_case_ = '''hf-internal-testing/tiny-detr-mobilenetsv3''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def A_ ( self : Optional[int] ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Tuple ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : str ): snake_case_ = 0.9985 snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def A_ ( self : Dict ): snake_case_ = '''Narsil/layoutlmv3-finetuned-funsd''' snake_case_ = 0.9993 snake_case_ = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ ) snake_case_ = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
56
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING A__ : Tuple = logging.get_logger(__name__) A__ : List[str] = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class __snake_case ( UpperCamelCase_ ): _a = '''blip_2_vision_model''' def __init__( self : Tuple , A_ : Optional[int]=1_4_0_8 , A_ : Tuple=6_1_4_4 , A_ : Tuple=3_9 , A_ : Dict=1_6 , A_ : Tuple=2_2_4 , A_ : Any=1_4 , A_ : Dict="gelu" , A_ : Any=0.0_0001 , A_ : int=0.0 , A_ : Dict=1e-10 , A_ : Optional[Any]=True , **A_ : Tuple , ): super().__init__(**A_) lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : Dict = intermediate_size lowerCAmelCase_ : List[str] = num_hidden_layers lowerCAmelCase_ : Dict = num_attention_heads lowerCAmelCase_ : Dict = patch_size lowerCAmelCase_ : Dict = image_size lowerCAmelCase_ : str = initializer_range lowerCAmelCase_ : List[str] = attention_dropout lowerCAmelCase_ : Optional[int] = layer_norm_eps lowerCAmelCase_ : Union[str, Any] = hidden_act lowerCAmelCase_ : Tuple = qkv_bias @classmethod def UpperCAmelCase__ ( cls : str , A_ : Union[str, os.PathLike] , **A_ : List[str]): cls._set_token_in_kwargs(A_) lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = cls.get_config_dict(A_ , **A_) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''') == "blip-2": lowerCAmelCase_ : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(A_ , **A_) class __snake_case ( UpperCamelCase_ ): _a = '''blip_2_qformer''' def __init__( self : List[str] , A_ : List[str]=3_0_5_2_2 , A_ : Union[str, Any]=7_6_8 , A_ : Optional[int]=1_2 , A_ : Optional[Any]=1_2 , A_ : str=3_0_7_2 , A_ : Tuple="gelu" , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]=0.1 , A_ : Any=5_1_2 , A_ : Optional[Any]=0.02 , A_ : List[Any]=1e-12 , A_ : Optional[int]=0 , A_ : Dict="absolute" , A_ : List[Any]=2 , A_ : Union[str, Any]=1_4_0_8 , **A_ : Optional[int] , ): super().__init__(pad_token_id=A_ , **A_) lowerCAmelCase_ : Dict = vocab_size lowerCAmelCase_ : str = hidden_size lowerCAmelCase_ : Union[str, Any] = num_hidden_layers lowerCAmelCase_ : Tuple = num_attention_heads lowerCAmelCase_ : List[str] = hidden_act lowerCAmelCase_ : Optional[int] = intermediate_size lowerCAmelCase_ : Any = hidden_dropout_prob lowerCAmelCase_ : int = attention_probs_dropout_prob lowerCAmelCase_ : str = max_position_embeddings lowerCAmelCase_ : Union[str, Any] = initializer_range lowerCAmelCase_ : Union[str, Any] = layer_norm_eps lowerCAmelCase_ : List[Any] = position_embedding_type lowerCAmelCase_ : Optional[Any] = cross_attention_frequency lowerCAmelCase_ : Dict = encoder_hidden_size @classmethod def UpperCAmelCase__ ( cls : Dict , A_ : Union[str, os.PathLike] , **A_ : Tuple): cls._set_token_in_kwargs(A_) lowerCAmelCase_ , lowerCAmelCase_ : str = cls.get_config_dict(A_ , **A_) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''') == "blip-2": lowerCAmelCase_ : List[Any] = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(A_ , **A_) class __snake_case ( UpperCamelCase_ ): _a = '''blip-2''' _a = True def __init__( self : str , A_ : str=None , A_ : Dict=None , A_ : List[str]=None , A_ : str=3_2 , **A_ : Any): super().__init__(**A_) if vision_config is None: lowerCAmelCase_ : Any = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''') if qformer_config is None: lowerCAmelCase_ : List[str] = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''') if text_config is None: lowerCAmelCase_ : Any = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''') lowerCAmelCase_ : Union[str, Any] = BlipaVisionConfig(**A_) lowerCAmelCase_ : int = BlipaQFormerConfig(**A_) lowerCAmelCase_ : Any = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' lowerCAmelCase_ : Dict = CONFIG_MAPPING[text_model_type](**A_) lowerCAmelCase_ : List[str] = self.text_config.tie_word_embeddings lowerCAmelCase_ : Optional[Any] = self.text_config.is_encoder_decoder lowerCAmelCase_ : Tuple = num_query_tokens lowerCAmelCase_ : Tuple = self.vision_config.hidden_size lowerCAmelCase_ : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCAmelCase_ : List[Any] = 1.0 lowerCAmelCase_ : List[Any] = 0.02 @classmethod def UpperCAmelCase__ ( cls : Dict , A_ : BlipaVisionConfig , A_ : BlipaQFormerConfig , A_ : PretrainedConfig , **A_ : List[Any] , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , ) def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__) lowerCAmelCase_ : Union[str, Any] = self.vision_config.to_dict() lowerCAmelCase_ : List[Any] = self.qformer_config.to_dict() lowerCAmelCase_ : Any = self.text_config.to_dict() lowerCAmelCase_ : Any = self.__class__.model_type return output
103
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def A_ ( self : List[str] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def A_ ( self : str ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Tuple ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ): snake_case_ = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , lowercase_ ) snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ): snake_case_ = self.num_labels snake_case_ = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.num_choices snake_case_ = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ): snake_case_ = self.num_labels snake_case_ = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : Union[str, Any] ): snake_case_ = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = True def A_ ( self : Tuple ): snake_case_ = MPNetModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def A_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ = model(lowercase_ )[0] snake_case_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase_ ) snake_case_ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
56
0
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowercase = str(bin(A__ ) )[2:] # remove the leading "0b" __lowercase = str(bin(A__ ) )[2:] __lowercase = max(len(A__ ) , len(A__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
104
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class a ( _lowerCamelCase ): def A_ ( self : str ): snake_case_ = tempfile.mkdtemp() snake_case_ = 8 # DPR tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase_ ) ) def A_ ( self : Union[str, Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : Union[str, Any] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : int ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def A_ ( self : str ): shutil.rmtree(self.tmpdirname ) def A_ ( self : str ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def A_ ( self : str ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def A_ ( self : str , lowercase_ : bool ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: snake_case_ = os.path.join(self.tmpdirname , '''dataset''' ) snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , ) return retriever def A_ ( self : Tuple ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) ) snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def A_ ( self : Optional[Any] ): snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : str ): snake_case_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = self.get_dummy_dataset() retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : int ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : str ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : Any ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : Any ): snake_case_ = 1 snake_case_ = self.get_dummy_legacy_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : List[str] ): import torch snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) snake_case_ ,snake_case_ ,snake_case_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , np.ndarray ) snake_case_ = retriever( lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , ) snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : Tuple ): snake_case_ = self.get_dpr_ctx_encoder_tokenizer() snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) retriever.set_ctx_encoder_tokenizer(lowercase_ ) snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) self.assertEqual( len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
56
0
"""simple docstring""" from collections import defaultdict class __UpperCamelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: a : Optional[int] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 a : Dict = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase__ ) ) ] a : int = defaultdict(lowerCAmelCase__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 a : List[str] = (1 << len(lowerCAmelCase__ )) - 1 def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement a : str = self.count_ways_until(lowerCAmelCase__ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. a : List[str] = total_ways_util return self.dp[mask][task_no] def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: # Store the list of persons for each task for i in range(len(lowerCAmelCase__ ) ): for j in task_performed[i]: self.task[j].append(lowerCAmelCase__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": a : List[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. a : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
105
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a : Dict = None a : List[Any] = logging.get_logger(__name__) a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a : str = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a : List[Any] = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class a ( _lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = TaTokenizer snake_case_ = [] def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = extra_ids @staticmethod def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , ) return max_model_length def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return snake_case_ = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) logger.info(F"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: snake_case_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A_ ( self : Dict ): return list( set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) ) def A_ ( self : Any ): return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
56
0
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCamelCase : Any = {'''UserAgent''': UserAgent().random} def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = script.contents[0] lowerCAmelCase__ : Tuple = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any ,lowercase_ : Dict ): lowerCAmelCase__ : Any = F'https://www.instagram.com/{username}/' lowerCAmelCase__ : List[str] = self.get_json() def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : int = requests.get(self.url ,headers=lowercase_ ).text lowerCAmelCase__ : Optional[Any] = BeautifulSoup(lowercase_ ,'''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Optional[Any] ): return F'{self.__class__.__name__}(\'{self.username}\')' def __str__( self : Union[str, Any] ): return F'{self.fullname} ({self.username}) is {self.biography}' @property def __lowerCAmelCase ( self : Optional[Any] ): return self.user_data["username"] @property def __lowerCAmelCase ( self : int ): return self.user_data["full_name"] @property def __lowerCAmelCase ( self : Optional[int] ): return self.user_data["biography"] @property def __lowerCAmelCase ( self : List[str] ): return self.user_data["business_email"] @property def __lowerCAmelCase ( self : Optional[Any] ): return self.user_data["external_url"] @property def __lowerCAmelCase ( self : Tuple ): return self.user_data["edge_followed_by"]["count"] @property def __lowerCAmelCase ( self : Any ): return self.user_data["edge_follow"]["count"] @property def __lowerCAmelCase ( self : Dict ): return self.user_data["edge_owner_to_timeline_media"]["count"] @property def __lowerCAmelCase ( self : List[str] ): return self.user_data["profile_pic_url_hd"] @property def __lowerCAmelCase ( self : str ): return self.user_data["is_verified"] @property def __lowerCAmelCase ( self : Optional[int] ): return self.user_data["is_private"] def __SCREAMING_SNAKE_CASE ( A_ = "github" ): import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions lowerCAmelCase__ : int = InstagramUser(A_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : Any = InstagramUser('''github''') print(instagram_user) print(F'''{instagram_user.number_of_posts = }''') print(F'''{instagram_user.number_of_followers = }''') print(F'''{instagram_user.number_of_followings = }''') print(F'''{instagram_user.email = }''') print(F'''{instagram_user.website = }''') print(F'''{instagram_user.profile_picture_url = }''') print(F'''{instagram_user.is_verified = }''') print(F'''{instagram_user.is_private = }''')
106
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCAmelCase : List[str] = { 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = [ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
107
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' snake_case_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ ,snake_case_ = emb.weight.shape snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' ) snake_case_ = mam_aaa['''args'''] snake_case_ = mam_aaa['''model'''] snake_case_ = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case_ = args.share_decoder_input_output_embed snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case_ = SpeechaTextConfig( vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, ) snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F" but all the following weights are missing {missing}" ) if tie_embeds: snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
56
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Optional[int] ="longformer" def __init__( self , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0 , snake_case__ = 2 , snake_case__ = 30_522 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3_072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 0.02 , snake_case__ = 1e-12 , snake_case__ = False , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase : Union[str, Any] = attention_window lowerCAmelCase : List[Any] = sep_token_id lowerCAmelCase : Dict = bos_token_id lowerCAmelCase : int = eos_token_id lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Dict = hidden_size lowerCAmelCase : Optional[Any] = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : Tuple = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : List[str] = onnx_export class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None ): """simple docstring""" super().__init__(snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Any = True @property def lowercase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = super().outputs if self.task == "default": lowerCAmelCase : Tuple = {0: "batch"} return outputs @property def lowercase__ ( self ): """simple docstring""" return 1e-4 @property def lowercase__ ( self ): """simple docstring""" return max(super().default_onnx_opset , 14 ) def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Dict = super().generate_dummy_inputs( preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCAmelCase : Dict = torch.zeros_like(inputs["input_ids"] ) # make every second token global lowerCAmelCase : List[Any] = 1 return inputs
108
'''simple docstring''' from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCamelCase ): snake_case_ = ["transformers", "torch", "note_seq"] def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
56
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A: Optional[int] = logging.get_logger(__name__) A: List[Any] = { "google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ): __lowerCAmelCase : Optional[int] = 'bit' __lowerCAmelCase : Union[str, Any] = ['preactivation', 'bottleneck'] __lowerCAmelCase : Union[str, Any] = ['SAME', 'VALID'] def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , _SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE="preactivation" , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) if layer_type not in self.layer_types: raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: UpperCAmelCase : List[str] = global_padding.upper() else: raise ValueError(F"Padding strategy {global_padding} not supported" ) UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Optional[int] = embedding_size UpperCAmelCase : List[str] = hidden_sizes UpperCAmelCase : int = depths UpperCAmelCase : Tuple = layer_type UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : Optional[int] = global_padding UpperCAmelCase : Any = num_groups UpperCAmelCase : Any = drop_path_rate UpperCAmelCase : List[str] = embedding_dynamic_padding UpperCAmelCase : Dict = output_stride UpperCAmelCase : Union[str, Any] = width_factor UpperCAmelCase : Tuple = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )] UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_aligned_output_features_output_indices( out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
109
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
110
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a : Dict = logging.get_logger(__name__) a : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class a ( _lowerCamelCase ): snake_case_ = "marian" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ): snake_case_ = vocab_size snake_case_ = decoder_vocab_size or vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class a ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A_ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ = {0: '''batch'''} snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A_ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(lowercase_ , self ).outputs if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape snake_case_ = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ ,snake_case_ = self.num_layers snake_case_ = min(lowercase_ , lowercase_ ) snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ ,snake_case_ = self.num_layers snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ ) snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: snake_case_ = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: snake_case_ = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def A_ ( self : List[str] ): return 1e-4
56
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase = logging.get_logger(__name__) class UpperCAmelCase ( _lowerCamelCase ,_lowerCamelCase ): A__ : List[str] = "maskformer-swin" A__ : Tuple = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__(self : Dict , snake_case__ : List[str]=2_24 , snake_case__ : List[str]=4 , snake_case__ : List[str]=3 , snake_case__ : Tuple=96 , snake_case__ : Any=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 12, 24] , snake_case__ : Any=7 , snake_case__ : int=4.0 , snake_case__ : str=True , snake_case__ : List[str]=0.0 , snake_case__ : int=0.0 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=0.02 , snake_case__ : int=1e-5 , snake_case__ : Any=None , snake_case__ : int=None , **snake_case__ : Tuple , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowercase_ ) snake_case : List[str] = image_size snake_case : List[Any] = patch_size snake_case : Optional[int] = num_channels snake_case : Dict = embed_dim snake_case : int = depths snake_case : Optional[int] = len(lowercase_ ) snake_case : Optional[int] = num_heads snake_case : Dict = window_size snake_case : Dict = mlp_ratio snake_case : str = qkv_bias snake_case : Any = hidden_dropout_prob snake_case : Any = attention_probs_dropout_prob snake_case : str = drop_path_rate snake_case : Any = hidden_act snake_case : Optional[int] = use_absolute_embeddings snake_case : Tuple = layer_norm_eps snake_case : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case : str = int(embed_dim * 2 ** (len(lowercase_ ) - 1) ) snake_case : Union[str, Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )] snake_case , snake_case : Tuple = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
59
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = CycleDiffusionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A_ ( self : Tuple ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ = CLIPTextModel(lowercase_ ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ): snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ = torch.manual_seed(lowercase_ ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def A_ ( self : Union[str, Any] ): snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A_ ( self : Union[str, Any] ): snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half''' ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Optional[int] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def A_ ( self : List[Any] ): return super().test_inference_batch_single_identical() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_save_load_optional_components() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class a ( unittest.TestCase ): def A_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Union[str, Any] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def A_ ( self : List[str] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2e-2
56
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a ( _lowerCamelCase ): snake_case_ = "big_bird" def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ): super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = rescale_embeddings snake_case_ = attention_type snake_case_ = use_bias snake_case_ = block_size snake_case_ = num_random_blocks snake_case_ = classifier_dropout class a ( _lowerCamelCase ): @property def A_ ( self : str ): if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
56
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json', 'umberto-commoncrawl-cased-v1': ( 'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json' ), 'umberto-wikipedia-uncased-v1': ( 'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json' ), } class snake_case_ ( _lowerCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = "camembert" def __init__( self : List[Any] , _UpperCamelCase : Dict=3_0_5_2_2 , _UpperCamelCase : Dict=7_6_8 , _UpperCamelCase : str=1_2 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Dict=3_0_7_2 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : Union[str, Any]=1e-12 , _UpperCamelCase : Dict=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : int=None , **_UpperCamelCase : Union[str, Any] , ) ->str: super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = classifier_dropout class snake_case_ ( _lowerCamelCase ): '''simple docstring''' @property def snake_case__( self : Any ) ->Tuple: if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
8
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str: '''simple docstring''' assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ = SqlDatasetReader( '''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} snake_case_ = features.copy() if features else default_expected_features snake_case_ = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con: snake_case_ = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() with pytest.raises(__UpperCAmelCase ): SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
56
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging _snake_case = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ): _A : str = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ), f'''{len(__UpperCAmelCase )} != {len(__UpperCAmelCase )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) _snake_case = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } _snake_case = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def lowerCAmelCase_ ( snake_case_,snake_case_ ): try: _A : int = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' f''' {n_student}''' ) return list(range(__UpperCAmelCase ) ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): if n_student > n_teacher: raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(__UpperCAmelCase ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def lowerCAmelCase_ ( snake_case_,snake_case_ = "student",snake_case_ = None,snake_case_ = None,snake_case_=False,snake_case_=None,snake_case_=None,**snake_case_,): _A : Dict = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.""" assert (e is not None) or (d is not None), _msg if isinstance(__UpperCAmelCase,__UpperCAmelCase ): AutoTokenizer.from_pretrained(__UpperCAmelCase ).save_pretrained(__UpperCAmelCase ) # purely for convenience _A : Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).eval() else: assert isinstance(__UpperCAmelCase,__UpperCAmelCase ), f'''teacher must be a model or string got type {type(__UpperCAmelCase )}''' _A : Dict = teacher.config.to_diff_dict() try: _A , _A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: _A : Optional[int] = teacher_e if d is None: _A : Tuple = teacher_d init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} ) except AttributeError: # T5 if hasattr(teacher.config,"""num_encoder_layers""" ): _A , _A : Optional[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: _A , _A : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: _A : Dict = teacher_e if d is None: _A : List[Any] = teacher_d if hasattr(teacher.config,"""num_encoder_layers""" ): init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} ) else: init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__UpperCAmelCase ) # Copy weights _A : List[Any] = teacher.config_class(**__UpperCAmelCase ) _A : Tuple = AutoModelForSeqaSeqLM.from_config(__UpperCAmelCase ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. _A : str = student.load_state_dict(teacher.state_dict(),strict=__UpperCAmelCase ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save _A , _A : int = list(range(__UpperCAmelCase ) ), list(range(__UpperCAmelCase ) ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' f''' {save_path}''' ) student.save_pretrained(__UpperCAmelCase ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: _A : Dict = pick_layers_to_copy(__UpperCAmelCase,__UpperCAmelCase ) if d_layers_to_copy is None: _A : Optional[Any] = pick_layers_to_copy(__UpperCAmelCase,__UpperCAmelCase ) try: if hasattr( __UpperCAmelCase,"""prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers,student.prophetnet.encoder.layers,__UpperCAmelCase ) copy_layers(teacher.prophetnet.decoder.layers,student.prophetnet.decoder.layers,__UpperCAmelCase ) else: copy_layers(teacher.model.encoder.layers,student.model.encoder.layers,__UpperCAmelCase ) copy_layers(teacher.model.decoder.layers,student.model.decoder.layers,__UpperCAmelCase ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block,student.encoder.block,__UpperCAmelCase ) copy_layers(teacher.decoder.block,student.decoder.block,__UpperCAmelCase ) logger.info( f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) _A : Optional[int] = { """teacher_type""": teacher.config.model_type, """copied_encoder_layers""": e_layers_to_copy, """copied_decoder_layers""": d_layers_to_copy, } student.save_pretrained(__UpperCAmelCase ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
26
'''simple docstring''' from collections import defaultdict def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCAmelCase ) if ret % 2 == 0: cuts.append(__UpperCAmelCase ) return ret def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a ,a : Dict = 10, 9 a : Dict = defaultdict(list) a : dict[int, bool] = {} a : list[int] = [] a : Tuple = 0 a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
56
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( _lowerCamelCase , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : List[Any] = KandinskyVaaControlnetPipeline __UpperCAmelCase : Optional[Any] = ['image_embeds', 'negative_image_embeds', 'hint'] __UpperCAmelCase : Dict = ['image_embeds', 'negative_image_embeds', 'hint'] __UpperCAmelCase : int = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __UpperCAmelCase : List[Any] = False @property def __UpperCAmelCase ( self ): return 32 @property def __UpperCAmelCase ( self ): return 32 @property def __UpperCAmelCase ( self ): return self.time_input_dim @property def __UpperCAmelCase ( self ): return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ): return 100 @property def __UpperCAmelCase ( self ): torch.manual_seed(0 ) __a = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __a = UNetaDConditionModel(**lowercase_ ) return model @property def __UpperCAmelCase ( self ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self ): torch.manual_seed(0 ) __a = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self ): __a = self.dummy_unet __a = self.dummy_movq __a = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase_ , ) __a = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __UpperCAmelCase ( self , _a , _a=0 ): __a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) __a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowercase_ ) # create hint __a = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) if str(lowercase_ ).startswith('''mps''' ): __a = torch.manual_seed(lowercase_ ) else: __a = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) __a = { '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def __UpperCAmelCase ( self ): __a = '''cpu''' __a = self.get_dummy_components() __a = self.pipeline_class(**lowercase_ ) __a = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) __a = pipe(**self.get_dummy_inputs(lowercase_ ) ) __a = output.images __a = pipe( **self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0] __a = image[0, -3:, -3:, -1] __a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __a = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): __a = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' ) __a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) __a = torch.from_numpy(np.array(lowercase_ ) ).float() / 255.0 __a = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __a = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(lowercase_ ) __a = KandinskyVaaControlnetPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) __a = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) __a = '''A robot, 4k photo''' __a = torch.Generator(device='''cuda''' ).manual_seed(0 ) __a , __a = pipe_prior( lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() __a = torch.Generator(device='''cuda''' ).manual_seed(0 ) __a = pipeline( image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , output_type='''np''' , ) __a = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ )
45
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
56
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a =logging.get_logger(__name__) a ={ 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class A_ ( _lowerCamelCase ): _UpperCAmelCase : str = '''data2vec-vision''' def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=7_6_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : int=1_2 ,SCREAMING_SNAKE_CASE__ : str=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2_2_4 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 5, 7, 1_1] ,SCREAMING_SNAKE_CASE__ : Tuple=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=0.4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_5_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5_5 ,**SCREAMING_SNAKE_CASE__ : str ,): super().__init__(**lowercase_) __lowerCamelCase : Tuple = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : str = intermediate_size __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Any = hidden_dropout_prob __lowerCamelCase : Any = attention_probs_dropout_prob __lowerCamelCase : Union[str, Any] = initializer_range __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = image_size __lowerCamelCase : Union[str, Any] = patch_size __lowerCamelCase : Tuple = num_channels __lowerCamelCase : int = use_mask_token __lowerCamelCase : Union[str, Any] = use_absolute_position_embeddings __lowerCamelCase : Any = use_relative_position_bias __lowerCamelCase : Any = use_shared_relative_position_bias __lowerCamelCase : Dict = layer_scale_init_value __lowerCamelCase : Union[str, Any] = drop_path_rate __lowerCamelCase : Optional[int] = use_mean_pooling # decode head attributes (semantic segmentation) __lowerCamelCase : Any = out_indices __lowerCamelCase : List[Any] = pool_scales # auxiliary head attributes (semantic segmentation) __lowerCamelCase : Tuple = use_auxiliary_head __lowerCamelCase : List[str] = auxiliary_loss_weight __lowerCamelCase : Any = auxiliary_channels __lowerCamelCase : int = auxiliary_num_convs __lowerCamelCase : int = auxiliary_concat_input __lowerCamelCase : str = semantic_loss_ignore_index class A_ ( _lowerCamelCase ): _UpperCAmelCase : Dict = version.parse('''1.11''' ) @property def lowerCAmelCase ( self : Any): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def lowerCAmelCase ( self : str): return 1E-4
73
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''', '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''', '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' ) return name def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( ) -> Any: '''simple docstring''' snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(__UpperCAmelCase ) snake_case_ = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=__UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' ) # forward pass snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) a : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
56
0
import collections import os import re from pathlib import Path lowercase__ : str = 'src/transformers' # Matches is_xxx_available() lowercase__ : Tuple = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase__ : List[str] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : List[str] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase__ : str = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Tuple = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : int = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Optional[int] = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase__ : Optional[int] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase__ : List[str] = re.compile(R"^\s*try:") # Catches a line with else: lowercase__ : str = re.compile(R"^\s*else:") def A_ ( snake_case : Union[str, Any] ) -> Dict: '''simple docstring''' if _re_test_backend.search(__UpperCAmelCase ) is None: return None __UpperCamelCase = [b[0] for b in _re_backend.findall(__UpperCAmelCase )] backends.sort() return "_and_".join(__UpperCAmelCase ) def A_ ( snake_case : int ) -> Optional[Any]: '''simple docstring''' with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCamelCase = f.readlines() __UpperCamelCase = 0 while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(__UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure __UpperCamelCase = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: __UpperCamelCase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(__UpperCAmelCase ): __UpperCamelCase = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0] __UpperCamelCase = re.findall(r'''\[([^\]]+)\]''' , __UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue __UpperCamelCase = _re_import_struct_key_value.search(__UpperCAmelCase ) if single_line_import_search is not None: __UpperCamelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 __UpperCamelCase = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. __UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): __UpperCamelCase = lines[line_index] if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None: __UpperCamelCase = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(''', ''' ) __UpperCamelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_between_brackets.search(__UpperCAmelCase ) is not None: __UpperCamelCase = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(''', ''' ) __UpperCamelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0] objects.extend(__UpperCAmelCase ) elif _re_quote_object.search(__UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 __UpperCamelCase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __UpperCamelCase = [] while ( line_index < len(__UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): __UpperCamelCase = lines[line_index] __UpperCamelCase = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 __UpperCamelCase = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(__UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. __UpperCamelCase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __UpperCamelCase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __UpperCamelCase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): __UpperCamelCase = lines[line_index] __UpperCamelCase = _re_import.search(__UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 __UpperCamelCase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A_ ( snake_case : List[str] , snake_case : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' def find_duplicates(snake_case : List[Any] ): return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __UpperCamelCase = [] for key in import_dict_objects.keys(): __UpperCamelCase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) __UpperCamelCase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __UpperCamelCase = '''base imports''' if key == '''none''' else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def A_ ( ) -> Tuple: '''simple docstring''' __UpperCamelCase = [] for root, _, files in os.walk(__UpperCAmelCase ): if "__init__.py" in files: __UpperCamelCase = os.path.join(__UpperCAmelCase , '''__init__.py''' ) __UpperCamelCase = parse_init(__UpperCAmelCase ) if objects is not None: __UpperCamelCase = analyze_results(*__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: __UpperCamelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append('''\n'''.join(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > 0: raise ValueError('''\n\n'''.join(__UpperCAmelCase ) ) def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = [] for path, directories, files in os.walk(__UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(__UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(__UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue __UpperCamelCase = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) ) __UpperCamelCase = short_path.replace(os.path.sep , '''.''' ) submodules.append(__UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue __UpperCamelCase = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) ) __UpperCamelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(__UpperCAmelCase ) return submodules lowercase__ : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def A_ ( ) -> Optional[int]: '''simple docstring''' from transformers.utils import direct_transformers_import __UpperCamelCase = direct_transformers_import(__UpperCAmelCase ) __UpperCamelCase = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(__UpperCAmelCase , '''__init__.py''' ) , '''r''' ) as f: __UpperCamelCase = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __UpperCAmelCase ) ) ) __UpperCamelCase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(__UpperCAmelCase ) > 0: __UpperCamelCase = '''\n'''.join(f"- {module}" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"{list_of_modules}\n" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
328
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": a : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
56
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class _A ( metaclass=_lowerCamelCase ): snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=_lowerCamelCase ): snake_case__ : Optional[int] = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=_lowerCamelCase ): snake_case__ : int = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=_lowerCamelCase ): snake_case__ : Optional[Any] = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=_lowerCamelCase ): snake_case__ : Any = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _A ( metaclass=_lowerCamelCase ): snake_case__ : Dict = ['torch', 'transformers', 'onnx'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
197
'''simple docstring''' import re from filelock import FileLock try: import nltk a : Union[str, Any] = True except (ImportError, ModuleNotFoundError): a : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
56
0
"""simple docstring""" from __future__ import annotations def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def _lowerCAmelCase ( lowercase_ ): if len(__UpperCAmelCase ) == 0: return 0 UpperCAmelCase = [0] * len(__UpperCAmelCase ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(__UpperCAmelCase ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
56
0
"""simple docstring""" from __future__ import annotations class A_ : """simple docstring""" def __init__( self :int , lowerCamelCase_ :Tuple=None ): """simple docstring""" lowerCamelCase__ : Any =data lowerCamelCase__ : str =None def __repr__( self :int ): """simple docstring""" lowerCamelCase__ : Dict =[] lowerCamelCase__ : Union[str, Any] =self while temp: string_rep.append(f"""{temp.data}""" ) lowerCamelCase__ : Tuple =temp.next return "->".join(lowercase_ ) def lowerCAmelCase_ ( snake_case_ : Tuple ) ->str: if not elements_list: raise Exception('The Elements List is empty' ) lowerCamelCase__ : int =Node(elements_list[0] ) for i in range(1 , len(__UpperCAmelCase ) ): lowerCamelCase__ : Union[str, Any] =Node(elements_list[i] ) lowerCamelCase__ : Any =current.next return head def lowerCAmelCase_ ( snake_case_ : str ) ->None: if head_node is not None and isinstance(__UpperCAmelCase , __UpperCAmelCase ): print_reverse(head_node.next ) print(head_node.data ) def lowerCAmelCase_ ( ) ->Union[str, Any]: from doctest import testmod testmod() lowerCamelCase__ : Tuple =make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] ) print('Linked List:' ) print(__UpperCAmelCase ) print('Elements in Reverse:' ) print_reverse(__UpperCAmelCase ) if __name__ == "__main__": main()
126
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class a ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ): super().__init__() snake_case_ = initial_learning_rate snake_case_ = warmup_steps snake_case_ = power snake_case_ = decay_schedule_fn snake_case_ = name def __call__( self : Tuple , lowercase_ : str ): with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ = tf.cast(lowercase_ , tf.floataa ) snake_case_ = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ = global_step_float / warmup_steps_float snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , ) def A_ ( self : Any ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]: '''simple docstring''' snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, ) if num_warmup_steps: snake_case_ = WarmUp( initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, ) if weight_decay_rate > 0.0: snake_case_ = AdamWeightDecay( learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, ) else: snake_case_ = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class a ( _lowerCamelCase ): def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ): super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) snake_case_ = weight_decay_rate snake_case_ = include_in_weight_decay snake_case_ = exclude_from_weight_decay @classmethod def A_ ( cls : Dict , lowercase_ : Union[str, Any] ): snake_case_ = {'''WarmUp''': WarmUp} return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ): super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ ) snake_case_ = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ): snake_case_ = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ): snake_case_ ,snake_case_ = list(zip(*lowercase_ ) ) return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ ) def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ = apply_state or {} snake_case_ = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ ) snake_case_ = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ): snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def A_ ( self : Optional[int] , lowercase_ : int ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return False return True class a ( _lowerCamelCase ): def __init__( self : List[Any] ): snake_case_ = [] snake_case_ = None @property def A_ ( self : Union[str, Any] ): if self._accum_steps is None: snake_case_ = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def A_ ( self : Dict ): if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Any , lowercase_ : int ): if not self._gradients: snake_case_ = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase_ ) != len(self._gradients ): raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" ) for accum_gradient, gradient in zip(self._gradients , lowercase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase_ ) self._accum_steps.assign_add(1 ) def A_ ( self : Optional[int] ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase_ ) )
56
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters UpperCAmelCase_ = (720, 1280) # Height, Width UpperCAmelCase_ = (0.4, 0.6) # if height or width lower than this scale, drop it. UpperCAmelCase_ = 1 / 100 UpperCAmelCase_ = '' UpperCAmelCase_ = '' UpperCAmelCase_ = '' UpperCAmelCase_ = 250 def lowerCAmelCase_ ( ) -> None: UpperCamelCase__ ,UpperCamelCase__ : Any = get_dataset(__UpperCAmelCase , __UpperCAmelCase ) for index in range(__UpperCAmelCase ): UpperCamelCase__ : Dict = random.sample(range(len(__UpperCAmelCase ) ) , 4 ) UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = update_image_and_anno( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , filter_scale=__UpperCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase__ : Tuple = random_chars(32 ) UpperCamelCase__ : List[Any] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase__ : Union[str, Any] = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(f"{file_root}.jpg" , __UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) UpperCamelCase__ : Union[str, Any] = [] for anno in new_annos: UpperCamelCase__ : Dict = anno[3] - anno[1] UpperCamelCase__ : str = anno[4] - anno[2] UpperCamelCase__ : str = anno[1] + width / 2 UpperCamelCase__ : Union[str, Any] = anno[2] + height / 2 UpperCamelCase__ : Optional[Any] = f"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__UpperCAmelCase ) with open(f"{file_root}.txt" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[Any] ) -> tuple[list, list]: UpperCamelCase__ : Dict = [] UpperCamelCase__ : str = [] for label_file in glob.glob(os.path.join(__UpperCAmelCase , '''*.txt''' ) ): UpperCamelCase__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__UpperCAmelCase ) as in_file: UpperCamelCase__ : Optional[int] = in_file.readlines() UpperCamelCase__ : int = os.path.join(__UpperCAmelCase , f"{label_name}.jpg" ) UpperCamelCase__ : Union[str, Any] = [] for obj_list in obj_lists: UpperCamelCase__ : Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' ) UpperCamelCase__ : List[str] = float(obj[1] ) - float(obj[3] ) / 2 UpperCamelCase__ : str = float(obj[2] ) - float(obj[4] ) / 2 UpperCamelCase__ : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2 UpperCamelCase__ : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__UpperCAmelCase ) labels.append(__UpperCAmelCase ) return img_paths, labels def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int , __UpperCAmelCase: str = 0.0 , ) -> tuple[list, list, str]: UpperCamelCase__ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCamelCase__ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase__ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase__ : str = int(scale_x * output_size[1] ) UpperCamelCase__ : List[str] = int(scale_y * output_size[0] ) UpperCamelCase__ : List[str] = [] UpperCamelCase__ : Optional[int] = [] for i, index in enumerate(__UpperCAmelCase ): UpperCamelCase__ : Tuple = all_img_list[index] path_list.append(__UpperCAmelCase ) UpperCamelCase__ : List[str] = all_annos[index] UpperCamelCase__ : Any = cva.imread(__UpperCAmelCase ) if i == 0: # top-left UpperCamelCase__ : Union[str, Any] = cva.resize(__UpperCAmelCase , (divid_point_x, divid_point_y) ) UpperCamelCase__ : Optional[int] = img for bbox in img_annos: UpperCamelCase__ : Optional[Any] = bbox[1] * scale_x UpperCamelCase__ : Any = bbox[2] * scale_y UpperCamelCase__ : Optional[Any] = bbox[3] * scale_x UpperCamelCase__ : List[Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCamelCase__ : Any = cva.resize(__UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) UpperCamelCase__ : List[str] = img for bbox in img_annos: UpperCamelCase__ : List[Any] = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase__ : List[str] = bbox[2] * scale_y UpperCamelCase__ : Any = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase__ : str = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCamelCase__ : Optional[Any] = cva.resize(__UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase__ : str = img for bbox in img_annos: UpperCamelCase__ : List[Any] = bbox[1] * scale_x UpperCamelCase__ : List[str] = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase__ : List[Any] = bbox[3] * scale_x UpperCamelCase__ : List[str] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCamelCase__ : Tuple = cva.resize( __UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase__ : Optional[int] = img for bbox in img_annos: UpperCamelCase__ : Any = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase__ : Any = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase__ : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCamelCase__ : List[str] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str: assert number_char > 1, "The number of character should greater than 1" UpperCamelCase__ : Any = ascii_lowercase + digits return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
201
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = AutoencoderKL snake_case_ = "sample" snake_case_ = 1e-2 @property def A_ ( self : Dict ): snake_case_ = 4 snake_case_ = 3 snake_case_ = (32, 32) snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ ) return {"sample": image} @property def A_ ( self : List[Any] ): return (3, 32, 32) @property def A_ ( self : Dict ): return (3, 32, 32) def A_ ( self : Union[str, Any] ): snake_case_ = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict def A_ ( self : Any ): pass def A_ ( self : str ): pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def A_ ( self : Dict ): # enable deterministic behavior for gradient checkpointing snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.model_class(**lowercase_ ) model.to(lowercase_ ) assert not model.is_gradient_checkpointing and model.training snake_case_ = model(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() snake_case_ = torch.randn_like(lowercase_ ) snake_case_ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing snake_case_ = self.model_class(**lowercase_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training snake_case_ = model_a(**lowercase_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() snake_case_ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) snake_case_ = dict(model.named_parameters() ) snake_case_ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def A_ ( self : Tuple ): snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowercase_ ) snake_case_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A_ ( self : Tuple ): snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) snake_case_ = model.to(lowercase_ ) model.eval() if torch_device == "mps": snake_case_ = torch.manual_seed(0 ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 ) snake_case_ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ = image.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample snake_case_ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": snake_case_ = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": snake_case_ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: snake_case_ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) ) @slow class a ( unittest.TestCase ): def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ): return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy" def A_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ): snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ ) return image def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ): snake_case_ = '''fp16''' if fpaa else None snake_case_ = torch.floataa if fpaa else torch.floataa snake_case_ = AutoencoderKL.from_pretrained( lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , ) model.to(lowercase_ ).eval() return model def A_ ( self : Any , lowercase_ : int=0 ): if torch_device == "mps": return torch.manual_seed(lowercase_ ) return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ).sample assert sample.shape == image.shape snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : List[str] ): snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ ) snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def A_ ( self : Optional[Any] , lowercase_ : Any ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ = model.decode(lowercase_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ): snake_case_ = self.get_sd_vae_model() snake_case_ = self.get_sd_image(lowercase_ ) snake_case_ = self.get_generator(lowercase_ ) with torch.no_grad(): snake_case_ = model.encode(lowercase_ ).latent_dist snake_case_ = dist.sample(generator=lowercase_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu() snake_case_ = torch.tensor(lowercase_ ) snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
56
0
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __lowerCamelCase = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} __lowerCamelCase = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', 'emoji': True, }, } ] __lowerCamelCase = 0 for log in Path().glob("""*.log"""): __lowerCamelCase = 0 with open(log, """r""") as f: for line in f: __lowerCamelCase = json.loads(line) if line.get("""nodeid""", """""") != "": __lowerCamelCase = line['nodeid'] if line.get("""duration""", None) is not None: __lowerCamelCase = F'{line["duration"]:.4f}' if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __lowerCamelCase = [] log.unlink() __lowerCamelCase = '' __lowerCamelCase = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" __lowerCamelCase = [] __lowerCamelCase = {} for test in failed_tests: __lowerCamelCase = test[0].split("""::""") __lowerCamelCase = data[0].split("""/""")[-1] if data[0] not in filesafailed: __lowerCamelCase = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __lowerCamelCase = [test[0] for test in failed_table] __lowerCamelCase = list(set(files)) # Count number of instances in failed_tests __lowerCamelCase = [] for file in individual_files: table.append([file, len(filesafailed[file])]) __lowerCamelCase = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: __lowerCamelCase = 'Too many failed tests, please see the full report in the Action results.' __lowerCamelCase = len(err) + 10 __lowerCamelCase = message[: 30_00 - offset] + F'\n...\n```\n{err}' print(F'### {message}') else: __lowerCamelCase = 'No failed tests! 🤗' print(F'## {message}') payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient __lowerCamelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": __lowerCamelCase = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) __lowerCamelCase = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) __lowerCamelCase = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) __lowerCamelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) __lowerCamelCase = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __lowerCamelCase = '' for i, row in enumerate(test_failures): if row[0] != test_class: __lowerCamelCase = row[0] else: __lowerCamelCase = '' __lowerCamelCase = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
59
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class a ( _lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1_280, 1_280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1_280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ): # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ ,snake_case_ = jax.random.split(lowercase_ ) snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"] def A_ ( self : List[str] ): snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype ) snake_case_ = self.only_cross_attention if isinstance(lowercase_ , lowercase_ ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_ , lowercase_ ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowercase_ ) snake_case_ = down_blocks # mid snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up snake_case_ = [] snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )] snake_case_ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": snake_case_ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: snake_case_ = FlaxUpBlockaD( in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowercase_ ) snake_case_ = output_channel snake_case_ = up_blocks # out snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) snake_case_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ): # 1. time if not isinstance(lowercase_ , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(lowercase_ , 0 ) snake_case_ = self.time_proj(lowercase_ ) snake_case_ = self.time_embedding(lowercase_ ) # 2. pre-process snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) ) snake_case_ = self.conv_in(lowercase_ ) # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_ , lowercase_ ): snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) else: snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: snake_case_ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_ , lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) snake_case_ = new_down_block_res_samples # 4. mid snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :] snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_ , lowercase_ ): snake_case_ = up_block( lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , ) else: snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train ) # 6. post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = nn.silu(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
56
0
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCAmelCase_ ( _lowerCamelCase ): '''simple docstring''' __A : Tuple = 42 class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __A=3 , __A=3 , __A=("DownEncoderBlock2D",) , __A=(64,) , __A=2 , __A=32 , __A="silu" , __A=True , ): """simple docstring""" super().__init__() lowerCamelCase : str = layers_per_block lowerCamelCase : List[Any] = torch.nn.Convad( lowercase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase : int = None lowerCamelCase : List[str] = nn.ModuleList([] ) # down lowerCamelCase : str = block_out_channels[0] for i, down_block_type in enumerate(lowercase_ ): lowerCamelCase : Tuple = output_channel lowerCamelCase : Union[str, Any] = block_out_channels[i] lowerCamelCase : Optional[Any] = i == len(lowercase_ ) - 1 lowerCamelCase : Dict = get_down_block( lowercase_ , num_layers=self.layers_per_block , in_channels=lowercase_ , out_channels=lowercase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , ) self.down_blocks.append(lowercase_ ) # mid lowerCamelCase : Optional[int] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # out lowerCamelCase : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase_ , eps=1e-6 ) lowerCamelCase : List[Any] = nn.SiLU() lowerCamelCase : List[Any] = 2 * out_channels if double_z else out_channels lowerCamelCase : Optional[Any] = nn.Convad(block_out_channels[-1] , lowercase_ , 3 , padding=1 ) lowerCamelCase : List[str] = False def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : str = x lowerCamelCase : Union[str, Any] = self.conv_in(lowercase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(__A ): def custom_forward(*__A ): return module(*lowercase_ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: lowerCamelCase : int = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , use_reentrant=lowercase_ ) # middle lowerCamelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , use_reentrant=lowercase_ ) else: for down_block in self.down_blocks: lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ ) # middle lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase_ ) else: # down for down_block in self.down_blocks: lowerCamelCase : Optional[int] = down_block(lowercase_ ) # middle lowerCamelCase : Any = self.mid_block(lowercase_ ) # post-process lowerCamelCase : str = self.conv_norm_out(lowercase_ ) lowerCamelCase : Optional[Any] = self.conv_act(lowercase_ ) lowerCamelCase : List[Any] = self.conv_out(lowercase_ ) return sample class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __A=3 , __A=3 , __A=("UpDecoderBlock2D",) , __A=(64,) , __A=2 , __A=32 , __A="silu" , __A="group" , ): """simple docstring""" super().__init__() lowerCamelCase : Union[str, Any] = layers_per_block lowerCamelCase : str = nn.Convad( lowercase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase : Union[str, Any] = None lowerCamelCase : Any = nn.ModuleList([] ) lowerCamelCase : Optional[Any] = in_channels if norm_type == "spatial" else None # mid lowerCamelCase : List[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # up lowerCamelCase : Union[str, Any] = list(reversed(lowercase_ ) ) lowerCamelCase : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowercase_ ): lowerCamelCase : Optional[Any] = output_channel lowerCamelCase : List[str] = reversed_block_out_channels[i] lowerCamelCase : Optional[int] = i == len(lowercase_ ) - 1 lowerCamelCase : Dict = get_up_block( lowercase_ , num_layers=self.layers_per_block + 1 , in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , resnet_time_scale_shift=lowercase_ , ) self.up_blocks.append(lowercase_ ) lowerCamelCase : List[Any] = output_channel # out if norm_type == "spatial": lowerCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , lowercase_ ) else: lowerCamelCase : Dict = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase_ , eps=1e-6 ) lowerCamelCase : Any = nn.SiLU() lowerCamelCase : Union[str, Any] = nn.Convad(block_out_channels[0] , lowercase_ , 3 , padding=1 ) lowerCamelCase : Optional[Any] = False def _snake_case ( self , __A , __A=None ): """simple docstring""" lowerCamelCase : Union[str, Any] = z lowerCamelCase : Optional[Any] = self.conv_in(lowercase_ ) lowerCamelCase : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__A ): def custom_forward(*__A ): return module(*lowercase_ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle lowerCamelCase : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) lowerCamelCase : Optional[Any] = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowerCamelCase : str = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) else: # middle lowerCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ ) lowerCamelCase : Optional[int] = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ ) else: # middle lowerCamelCase : str = self.mid_block(lowercase_ , lowercase_ ) lowerCamelCase : str = sample.to(lowercase_ ) # up for up_block in self.up_blocks: lowerCamelCase : Any = up_block(lowercase_ , lowercase_ ) # post-process if latent_embeds is None: lowerCamelCase : Optional[Any] = self.conv_norm_out(lowercase_ ) else: lowerCamelCase : Optional[Any] = self.conv_norm_out(lowercase_ , lowercase_ ) lowerCamelCase : str = self.conv_act(lowercase_ ) lowerCamelCase : List[Any] = self.conv_out(lowercase_ ) return sample class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , __A , __A , __A , __A=None , __A="random" , __A=False , __A=True ): """simple docstring""" super().__init__() lowerCamelCase : Any = n_e lowerCamelCase : int = vq_embed_dim lowerCamelCase : Tuple = beta lowerCamelCase : List[str] = legacy lowerCamelCase : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowerCamelCase : Optional[Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) lowerCamelCase : Dict = self.used.shape[0] lowerCamelCase : List[str] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowerCamelCase : List[str] = self.re_embed lowerCamelCase : Tuple = self.re_embed + 1 print( F"""Remapping {self.n_e} indices to {self.re_embed} indices. """ F"""Using {self.unknown_index} for unknown indices.""" ) else: lowerCamelCase : Any = n_e lowerCamelCase : Optional[int] = sane_index_shape def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : Union[str, Any] = inds.shape assert len(lowercase_ ) > 1 lowerCamelCase : Dict = inds.reshape(ishape[0] , -1 ) lowerCamelCase : Tuple = self.used.to(lowercase_ ) lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() lowerCamelCase : Tuple = match.argmax(-1 ) lowerCamelCase : Any = match.sum(2 ) < 1 if self.unknown_index == "random": lowerCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowerCamelCase : Tuple = self.unknown_index return new.reshape(lowercase_ ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : int = inds.shape assert len(lowercase_ ) > 1 lowerCamelCase : int = inds.reshape(ishape[0] , -1 ) lowerCamelCase : int = self.used.to(lowercase_ ) if self.re_embed > self.used.shape[0]: # extra token lowerCamelCase : Any = 0 # simply set to zero lowerCamelCase : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase_ ) return back.reshape(lowercase_ ) def _snake_case ( self , __A ): """simple docstring""" lowerCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous() lowerCamelCase : Tuple = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowerCamelCase : int = torch.argmin(torch.cdist(lowercase_ , self.embedding.weight ) , dim=1 ) lowerCamelCase : Any = self.embedding(lowercase_ ).view(z.shape ) lowerCamelCase : List[Any] = None lowerCamelCase : Any = None # compute loss for embedding if not self.legacy: lowerCamelCase : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowerCamelCase : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowerCamelCase : List[str] = z + (z_q - z).detach() # reshape back to match original input shape lowerCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowerCamelCase : Any = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowerCamelCase : Union[str, Any] = self.remap_to_used(lowercase_ ) lowerCamelCase : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowerCamelCase : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _snake_case ( self , __A , __A ): """simple docstring""" if self.remap is not None: lowerCamelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis lowerCamelCase : Any = self.unmap_to_all(lowercase_ ) lowerCamelCase : Union[str, Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowerCamelCase : Optional[Any] = self.embedding(lowercase_ ) if shape is not None: lowerCamelCase : Optional[int] = z_q.view(lowercase_ ) # reshape back to match original input shape lowerCamelCase : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCAmelCase_ ( _lowerCamelCase ): '''simple docstring''' def __init__( self , __A , __A=False ): """simple docstring""" lowerCamelCase : str = parameters lowerCamelCase , lowerCamelCase : int = torch.chunk(lowercase_ , 2 , dim=1 ) lowerCamelCase : str = torch.clamp(self.logvar , -30.0 , 20.0 ) lowerCamelCase : Optional[int] = deterministic lowerCamelCase : Optional[Any] = torch.exp(0.5 * self.logvar ) lowerCamelCase : Optional[Any] = torch.exp(self.logvar ) if self.deterministic: lowerCamelCase : int = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _snake_case ( self , __A = None ): """simple docstring""" lowerCamelCase : Optional[int] = randn_tensor( self.mean.shape , generator=lowercase_ , device=self.parameters.device , dtype=self.parameters.dtype ) lowerCamelCase : Optional[Any] = self.mean + self.std * sample return x def _snake_case ( self , __A=None ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _snake_case ( self , __A , __A=[1, 2, 3] ): """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) lowerCamelCase : List[Any] = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase_ ) def _snake_case ( self ): """simple docstring""" return self.mean
283
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters a : Dict = (720, 1280) # Height, Width a : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it. a : Dict = 1 / 100 a : str = '' a : Any = '' a : Optional[int] = '' a : List[str] = 250 def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ ,snake_case_ = get_dataset(__UpperCAmelCase, __UpperCAmelCase ) for index in range(__UpperCAmelCase ): snake_case_ = random.sample(range(len(__UpperCAmelCase ) ), 4 ) snake_case_ ,snake_case_ ,snake_case_ = update_image_and_anno( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, filter_scale=__UpperCAmelCase, ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case_ = random_chars(32 ) snake_case_ = path.split(os.sep )[-1].rsplit('''.''', 1 )[0] snake_case_ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(F"{file_root}.jpg", __UpperCAmelCase, [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) snake_case_ = [] for anno in new_annos: snake_case_ = anno[3] - anno[1] snake_case_ = anno[4] - anno[2] snake_case_ = anno[1] + width / 2 snake_case_ = anno[2] + height / 2 snake_case_ = F"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(__UpperCAmelCase ) with open(F"{file_root}.txt", '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[list, list]: '''simple docstring''' snake_case_ = [] snake_case_ = [] for label_file in glob.glob(os.path.join(__UpperCAmelCase, '''*.txt''' ) ): snake_case_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0] with open(__UpperCAmelCase ) as in_file: snake_case_ = in_file.readlines() snake_case_ = os.path.join(__UpperCAmelCase, F"{label_name}.jpg" ) snake_case_ = [] for obj_list in obj_lists: snake_case_ = obj_list.rstrip('''\n''' ).split(''' ''' ) snake_case_ = float(obj[1] ) - float(obj[3] ) / 2 snake_case_ = float(obj[2] ) - float(obj[4] ) / 2 snake_case_ = float(obj[1] ) + float(obj[3] ) / 2 snake_case_ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(__UpperCAmelCase ) labels.append(__UpperCAmelCase ) return img_paths, labels def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, ) -> tuple[list, list, str]: '''simple docstring''' snake_case_ = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta ) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) snake_case_ = int(scale_x * output_size[1] ) snake_case_ = int(scale_y * output_size[0] ) snake_case_ = [] snake_case_ = [] for i, index in enumerate(__UpperCAmelCase ): snake_case_ = all_img_list[index] path_list.append(__UpperCAmelCase ) snake_case_ = all_annos[index] snake_case_ = cva.imread(__UpperCAmelCase ) if i == 0: # top-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = bbox[2] * scale_y snake_case_ = bbox[3] * scale_x snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right snake_case_ = cva.resize(__UpperCAmelCase, (output_size[1] - divid_point_x, divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = bbox[2] * scale_y snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left snake_case_ = cva.resize(__UpperCAmelCase, (divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = bbox[1] * scale_x snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = bbox[3] * scale_x snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right snake_case_ = cva.resize( __UpperCAmelCase, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) snake_case_ = img for bbox in img_annos: snake_case_ = scale_x + bbox[1] * (1 - scale_x) snake_case_ = scale_y + bbox[2] * (1 - scale_y) snake_case_ = scale_x + bbox[3] * (1 - scale_x) snake_case_ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: snake_case_ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" snake_case_ = ascii_lowercase + digits return "".join(random.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
56
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_ ( _lowerCamelCase ): '''simple docstring''' def snake_case__( self : str ) ->Optional[int]: snake_case_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(lowercase_ , '''num_heads''' ) ) class snake_case_ : '''simple docstring''' def __init__( self : int , _UpperCamelCase : Any , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Optional[int]=6_4 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Union[str, Any]=[1_6, 4_8, 9_6] , _UpperCamelCase : List[str]=[1, 3, 6] , _UpperCamelCase : Optional[Any]=[1, 2, 1_0] , _UpperCamelCase : List[Any]=[7, 3, 3] , _UpperCamelCase : List[Any]=[4, 2, 2] , _UpperCamelCase : Union[str, Any]=[2, 1, 1] , _UpperCamelCase : Tuple=[2, 2, 2] , _UpperCamelCase : Union[str, Any]=[False, False, True] , _UpperCamelCase : str=[0.0, 0.0, 0.0] , _UpperCamelCase : Optional[Any]=0.02 , _UpperCamelCase : Optional[Any]=1e-12 , _UpperCamelCase : int=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Any=2 , ) ->List[str]: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_sizes snake_case_ = patch_stride snake_case_ = patch_padding snake_case_ = is_training snake_case_ = use_labels snake_case_ = num_labels snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = num_heads snake_case_ = stride_kv snake_case_ = depth snake_case_ = cls_token snake_case_ = attention_drop_rate snake_case_ = initializer_range snake_case_ = layer_norm_eps def snake_case__( self : List[Any] ) ->int: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: # create a random int32 tensor of given shape snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def snake_case__( self : Tuple ) ->Optional[Any]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def snake_case__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Any ) ->List[str]: snake_case_ = TFCvtModel(config=lowercase_ ) snake_case_ = model(lowercase_ , training=lowercase_ ) snake_case_ = (self.image_size, self.image_size) snake_case_, snake_case_ = image_size[0], image_size[1] for i in range(len(self.depth ) ): snake_case_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) snake_case_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def snake_case__( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[Any]: snake_case_ = self.num_labels snake_case_ = TFCvtForImageClassification(lowercase_ ) snake_case_ = model(lowercase_ , labels=lowercase_ , training=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__( self : str ) ->List[str]: snake_case_ = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE : List[Any] = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : int = False def snake_case__( self : Dict ) ->Optional[Any]: snake_case_ = TFCvtModelTester(self ) snake_case_ = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7 ) def snake_case__( self : List[Any] ) ->Tuple: self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='''Cvt does not output attentions''' ) def snake_case__( self : Any ) ->Optional[int]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def snake_case__( self : Any ) ->List[Any]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def snake_case__( self : int ) ->List[str]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) def snake_case__( self : List[Any] ) ->List[Any]: super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def snake_case__( self : Union[str, Any] ) ->str: super().test_keras_fit() @unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' ) def snake_case__( self : Any ) ->Optional[Any]: snake_case_ = tf.keras.mixed_precision.Policy('''mixed_float16''' ) tf.keras.mixed_precision.set_global_policy(lowercase_ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('''float32''' ) def snake_case__( self : Tuple ) ->Tuple: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowercase_ ) snake_case_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_ ) def snake_case__( self : List[str] ) ->Dict: def check_hidden_states_output(_UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : int ): snake_case_ = model_class(lowercase_ ) snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) snake_case_ = outputs.hidden_states snake_case_ = len(self.model_tester.depth ) self.assertEqual(len(lowercase_ ) , lowercase_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def snake_case__( self : str ) ->int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__( self : Optional[int] ) ->Tuple: for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFCvtModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __SCREAMING_SNAKE_CASE (): snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__( self : Any ) ->str: return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''tf''' ) # forward pass snake_case_ = model(**lowercase_ ) # verify the logits snake_case_ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase_ ) snake_case_ = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1e-4 ) )
8
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a : @staticmethod def A_ ( *lowercase_ : int , **lowercase_ : str ): pass @is_pipeline_test @require_vision @require_timm @require_torch class a ( unittest.TestCase ): snake_case_ = MODEL_FOR_OBJECT_DETECTION_MAPPING def A_ ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ): snake_case_ = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def A_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : int ): snake_case_ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 ) self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) import datasets snake_case_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case_ = [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] snake_case_ = object_detector(lowercase_ , threshold=0.0 ) self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for outputs in batch_outputs: self.assertGreater(len(lowercase_ ) , 0 ) for detected_object in outputs: self.assertEqual( lowercase_ , { '''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ ), '''box''': {'''xmin''': ANY(lowercase_ ), '''ymin''': ANY(lowercase_ ), '''xmax''': ANY(lowercase_ ), '''ymax''': ANY(lowercase_ )}, } , ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def A_ ( self : int ): pass @require_torch def A_ ( self : Tuple ): snake_case_ = '''hf-internal-testing/tiny-detr-mobilenetsv3''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ] , ) @require_torch @slow def A_ ( self : Optional[int] ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = AutoModelForObjectDetection.from_pretrained(lowercase_ ) snake_case_ = AutoFeatureExtractor.from_pretrained(lowercase_ ) snake_case_ = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Tuple ): snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) snake_case_ = object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ] , ) @require_torch @slow def A_ ( self : str ): snake_case_ = 0.9985 snake_case_ = '''facebook/detr-resnet-50''' snake_case_ = pipeline('''object-detection''' , model=lowercase_ ) snake_case_ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=lowercase_ ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ] , ) @require_torch @require_pytesseract @slow def A_ ( self : Dict ): snake_case_ = '''Narsil/layoutlmv3-finetuned-funsd''' snake_case_ = 0.9993 snake_case_ = pipeline('''object-detection''' , model=lowercase_ , threshold=lowercase_ ) snake_case_ = object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(lowercase_ , decimals=4 ) , [ {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ] , )
56
0
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class lowercase : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=64 , _a=5 , _a=4 , _a=64 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]: _A : Optional[int] = parent _A : Any = batch_size _A : str = seq_length _A : Optional[int] = is_training _A : Tuple = use_input_mask _A : List[str] = use_token_type_ids _A : Optional[int] = use_labels _A : int = vocab_size _A : Dict = hidden_size _A : Tuple = num_hidden_layers _A : Union[str, Any] = num_attention_heads _A : Union[str, Any] = intermediate_size _A : str = hidden_act _A : Optional[Any] = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Optional[Any] = max_position_embeddings _A : Dict = type_vocab_size _A : Tuple = type_sequence_label_size _A : int = initializer_range _A : Union[str, Any] = num_labels _A : Optional[Any] = num_choices _A : int = scope def a__ ( self ) -> Optional[int]: return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def a__ ( self ) -> Any: _A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A : List[Any] = None if self.use_input_mask: _A : Any = random_attention_mask([self.batch_size, self.seq_length] ) _A : Dict = None _A : Union[str, Any] = None _A : int = None if self.use_labels: _A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) _A : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self ) -> str: return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]: _A : List[Any] = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() _A : Dict = model(lowercase_ , lowercase_ ) _A : Optional[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[int]: _A : int = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() _A : Optional[Any] = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> int: _A : str = self.num_labels _A : Optional[Any] = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() _A : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]: _A : Any = self.num_choices _A : str = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() _A : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A : Optional[int] = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Any: _A : int = self.num_labels _A : str = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() _A : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self ) -> Optional[int]: _A : Dict = self.prepare_config_and_inputs() ((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) : Optional[Any] = config_and_inputs _A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( _lowerCamelCase,_lowerCamelCase,unittest.TestCase ): _a = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _a = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) _a = False _a = True def a__ ( self ) -> List[str]: _A : str = MPNetModelTester(self ) _A : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def a__ ( self ) -> Optional[int]: self.config_tester.run_common_tests() def a__ ( self ) -> Dict: _A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def a__ ( self ) -> Optional[int]: _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def a__ ( self ) -> Union[str, Any]: _A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def a__ ( self ) -> Dict: _A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def a__ ( self ) -> int: _A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class lowercase ( unittest.TestCase ): @slow def a__ ( self ) -> Union[str, Any]: _A : Optional[int] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) _A : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _A : Optional[int] = model(lowercase_ )[0] _A : Optional[int] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase_ ) _A : str = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
26
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class a : def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=64 , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : List[Any]=64 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def A_ ( self : List[str] ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def A_ ( self : str ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Tuple ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] ): snake_case_ = MPNetModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , lowercase_ ) snake_case_ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ): snake_case_ = MPNetForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any ): snake_case_ = self.num_labels snake_case_ = MPNetForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Any , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict ): snake_case_ = self.num_choices snake_case_ = MPNetForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : List[str] ): snake_case_ = self.num_labels snake_case_ = MPNetForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self : Union[str, Any] ): snake_case_ = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = True def A_ ( self : Tuple ): snake_case_ = MPNetModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def A_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ ) def A_ ( self : List[Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ ) def A_ ( self : Union[str, Any] ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ ) def A_ ( self : Tuple ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ ) @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): snake_case_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) snake_case_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ = model(lowercase_ )[0] snake_case_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowercase_ ) snake_case_ = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
56
0
"""simple docstring""" import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger lowercase_ = get_logger(__name__) class __lowerCAmelCase ( enum.Enum ): '''simple docstring''' __UpperCAmelCase : str = 'all_checks' __UpperCAmelCase : Optional[Any] = 'basic_checks' __UpperCAmelCase : Optional[int] = 'no_checks' class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=None ) -> Dict: if expected_checksums is None: logger.info('''Unable to verify checksums.''' ) return if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) ) if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0: raise UnexpectedDownloadedFile(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) ) __a = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __a = ''' for ''' + verification_name if verification_name is not None else '''''' if len(__UpperCAmelCase ) > 0: raise NonMatchingChecksumError( f'''Checksums didn\'t match{for_verification_name}:\n''' f'''{bad_urls}\n''' '''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' ) logger.info('''All the checksums matched successfully''' + for_verification_name ) class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass class __lowerCAmelCase ( _lowerCamelCase ): '''simple docstring''' pass def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> Tuple: if expected_splits is None: logger.info('''Unable to verify splits sizes.''' ) return if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0: raise ExpectedMoreSplits(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) ) if len(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) > 0: raise UnexpectedSplits(str(set(__UpperCAmelCase ) - set(__UpperCAmelCase ) ) ) __a = [ {'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(__UpperCAmelCase ) > 0: raise NonMatchingSplitsSizesError(str(__UpperCAmelCase ) ) logger.info('''All the splits matched successfully.''' ) def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] = True ) -> dict: if record_checksum: __a = shaaaa() with open(__UpperCAmelCase , '''rb''' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'''''' ): m.update(__UpperCAmelCase ) __a = m.hexdigest() else: __a = None return {"num_bytes": os.path.getsize(__UpperCAmelCase ), "checksum": checksum} def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
45
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class a ( _lowerCamelCase ): def A_ ( self : str ): snake_case_ = tempfile.mkdtemp() snake_case_ = 8 # DPR tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(lowercase_ , exist_ok=lowercase_ ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase_ ) ) def A_ ( self : Union[str, Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : Union[str, Any] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def A_ ( self : int ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def A_ ( self : str ): shutil.rmtree(self.tmpdirname ) def A_ ( self : str ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def A_ ( self : str ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def A_ ( self : str , lowercase_ : bool ): snake_case_ = self.get_dummy_dataset() snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: snake_case_ = os.path.join(self.tmpdirname , '''dataset''' ) snake_case_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , ) return retriever def A_ ( self : Tuple ): snake_case_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) snake_case_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) snake_case_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) snake_case_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(lowercase_ , open(lowercase_ , '''wb''' ) ) snake_case_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) snake_case_ = RagRetriever( lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def A_ ( self : Optional[Any] ): snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : str ): snake_case_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: snake_case_ = self.get_dummy_dataset() retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : int ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : str ): snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : Any ): snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) def A_ ( self : Any ): snake_case_ = 1 snake_case_ = self.get_dummy_legacy_index_retriever() snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ ,snake_case_ ,snake_case_ = retriever.retrieve(lowercase_ , n_docs=lowercase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowercase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , lowercase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def A_ ( self : int ): snake_case_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowercase_ ) snake_case_ = RagRetriever.from_pretrained(lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever.retrieve(lowercase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : List[str] ): import torch snake_case_ = 1 snake_case_ = self.get_dummy_canonical_hf_index_retriever() snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) snake_case_ ,snake_case_ ,snake_case_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertIsInstance(lowercase_ , np.ndarray ) snake_case_ = retriever( lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='''pt''' , ) snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) self.assertIsInstance(lowercase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def A_ ( self : Tuple ): snake_case_ = self.get_dpr_ctx_encoder_tokenizer() snake_case_ = 1 snake_case_ = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ ) retriever.set_ctx_encoder_tokenizer(lowercase_ ) snake_case_ = [[5, 7], [10, 11]] snake_case_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) snake_case_ = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ ) self.assertEqual( len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , lowercase_ ) # check for doc token related keys in dictionary.
56
0
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class A_ ( _lowerCamelCase , unittest.TestCase ): _UpperCAmelCase : Dict = RoFormerTokenizer _UpperCAmelCase : Optional[int] = RoFormerTokenizerFast _UpperCAmelCase : Tuple = True _UpperCAmelCase : Union[str, Any] = True def lowerCAmelCase ( self : Tuple): super().setUp() def lowerCAmelCase ( self : List[str] ,**SCREAMING_SNAKE_CASE__ : str): return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**lowercase_) def lowerCAmelCase ( self : int ,**SCREAMING_SNAKE_CASE__ : int): return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**lowercase_) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : int = '永和服装饰品有限公司,今天天气非常好' __lowerCamelCase : List[str] = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Any = self.get_tokenizer() __lowerCamelCase , __lowerCamelCase : Tuple = self.get_chinese_input_output_texts() __lowerCamelCase : Optional[int] = tokenizer.tokenize(lowercase_) self.assertListEqual(lowercase_ ,output_text.split()) __lowerCamelCase : List[str] = tokens + [tokenizer.unk_token] __lowerCamelCase : List[Any] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) ,lowercase_) def lowerCAmelCase ( self : int): __lowerCamelCase : int = self.get_rust_tokenizer() __lowerCamelCase , __lowerCamelCase : int = self.get_chinese_input_output_texts() __lowerCamelCase : List[str] = tokenizer.tokenize(lowercase_) self.assertListEqual(lowercase_ ,output_text.split()) __lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token] __lowerCamelCase : List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) ,lowercase_) def lowerCAmelCase ( self : int): pass def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Optional[int]): pass
73
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: a : Dict = None a : List[Any] = logging.get_logger(__name__) a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} a : str = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 a : List[Any] = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class a ( _lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = TaTokenizer snake_case_ = [] def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True snake_case_ = extra_ids @staticmethod def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , ) return max_model_length def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return snake_case_ = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) logger.info(F"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: snake_case_ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ): snake_case_ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A_ ( self : Dict ): return list( set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) ) def A_ ( self : Any ): return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
56
0
import re def A_ ( snake_case : Optional[int] ) -> bool: '''simple docstring''' __UpperCamelCase = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) ) if __name__ == "__main__": lowercase__ : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
328
'''simple docstring''' from __future__ import annotations import math def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__UpperCAmelCase ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) return min( minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ = math.log(len(__UpperCAmelCase ), 2 ) print('''Optimal value : ''', end='''''' ) print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
56
0
"""simple docstring""" from maths.prime_check import is_prime def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> int: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowercase = f'Input value of [number={number}] must be an integer' raise TypeError(__UpperCAmelCase ) if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
197
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' snake_case_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: snake_case_ = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ ,snake_case_ = emb.weight.shape snake_case_ = nn.Linear(__UpperCAmelCase, __UpperCAmelCase, bias=__UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' ) snake_case_ = mam_aaa['''args'''] snake_case_ = mam_aaa['''model'''] snake_case_ = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case_ = args.share_decoder_input_output_embed snake_case_ = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case_ = SpeechaTextConfig( vocab_size=__UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', num_conv_layers=len(__UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=__UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=__UpperCAmelCase, num_beams=5, max_length=200, use_cache=__UpperCAmelCase, decoder_start_token_id=2, early_stopping=__UpperCAmelCase, ) snake_case_ = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) snake_case_ ,snake_case_ = model.model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F" but all the following weights are missing {missing}" ) if tie_embeds: snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case_ = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a : Any = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a : List[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
56
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case_ = { 'configuration_pix2struct': [ 'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Pix2StructConfig', 'Pix2StructTextConfig', 'Pix2StructVisionConfig', ], 'processing_pix2struct': ['Pix2StructProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ['Pix2StructImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Pix2StructPreTrainedModel', 'Pix2StructForConditionalGeneration', 'Pix2StructVisionModel', 'Pix2StructTextModel', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
'''simple docstring''' from ..utils import DummyObject, requires_backends class a ( metaclass=_lowerCamelCase ): snake_case_ = ["transformers", "torch", "note_seq"] def __init__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : int ): requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def A_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ): requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
56
0
"""simple docstring""" from __future__ import annotations class A_ : """simple docstring""" def __init__( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str ): """simple docstring""" lowerCamelCase__ , lowerCamelCase__ : Any =text, pattern lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =len(lowercase_ ), len(lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCAmelCase__ ( self :Optional[Any] ): """simple docstring""" lowerCamelCase__ : List[Any] =[] for i in range(self.textLen - self.patLen + 1 ): lowerCamelCase__ : int =self.mismatch_in_text(lowercase_ ) if mismatch_index == -1: positions.append(lowercase_ ) else: lowerCamelCase__ : str =self.match_in_pattern(self.text[mismatch_index] ) lowerCamelCase__ : Any =( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase = 'ABAABA' lowerCAmelCase = 'AB' lowerCAmelCase = BoyerMooreSearch(text, pattern) lowerCAmelCase = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
126
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a : int = abspath(join(dirname(__file__), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' config.addinivalue_line( '''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main snake_case_ = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' if exitstatus == 5: snake_case_ = 0 # Doctest custom flag to ignore output. a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT') a : Optional[int] = doctest.OutputChecker class a ( _lowerCamelCase ): def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ ) a : List[Any] = CustomOutputChecker a : Optional[int] = HfDoctestModule a : Tuple = HfDocTestParser
56
0
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e'] def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict , __UpperCAmelCase: List[str] ) -> Any: UpperCamelCase__ : str = start # add current to visited visited.append(__UpperCAmelCase ) UpperCamelCase__ : Optional[Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: UpperCamelCase__ : Optional[int] = topological_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # if all neighbors visited add current to sort sort.append(__UpperCAmelCase ) # if all vertices haven't been visited select a new one to visit if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): for vertice in vertices: if vertice not in visited: UpperCamelCase__ : Optional[int] = topological_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # return sort return sort if __name__ == "__main__": UpperCAmelCase_ = topological_sort('a', [], []) print(sort)
201
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a : Dict = logging.get_logger(__name__) a : List[str] = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class a ( _lowerCamelCase ): snake_case_ = "marian" snake_case_ = ["past_key_values"] snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ): snake_case_ = vocab_size snake_case_ = decoder_vocab_size or vocab_size snake_case_ = max_position_embeddings snake_case_ = d_model snake_case_ = encoder_ffn_dim snake_case_ = encoder_layers snake_case_ = encoder_attention_heads snake_case_ = decoder_ffn_dim snake_case_ = decoder_layers snake_case_ = decoder_attention_heads snake_case_ = dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = activation_function snake_case_ = init_std snake_case_ = encoder_layerdrop snake_case_ = decoder_layerdrop snake_case_ = use_cache snake_case_ = encoder_layers snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class a ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A_ ( self : Union[str, Any] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ = {0: '''batch'''} snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: snake_case_ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A_ ( self : Dict ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super().outputs else: snake_case_ = super(lowercase_ , self ).outputs if self.use_past: snake_case_ ,snake_case_ = self.num_layers for i in range(lowercase_ ): snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs snake_case_ = seq_length if not self.use_past else 1 snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} snake_case_ = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape snake_case_ = common_inputs['''decoder_input_ids'''].shape[1] snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = decoder_seq_length + 3 snake_case_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) snake_case_ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) snake_case_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered snake_case_ ,snake_case_ = self.num_layers snake_case_ = min(lowercase_ , lowercase_ ) snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ ,snake_case_ = self.num_layers snake_case_ ,snake_case_ = self.num_attention_heads snake_case_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) snake_case_ = common_inputs['''attention_mask'''].dtype snake_case_ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) snake_case_ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ ) snake_case_ = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: snake_case_ = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ): if self.task in ["default", "seq2seq-lm"]: snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: snake_case_ = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def A_ ( self : List[str] ): return 1e-4
56
0
__lowerCamelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' __lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}] __lowerCamelCase = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
59
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = CycleDiffusionPipeline snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"} snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def A_ ( self : Tuple ): torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) snake_case_ = CLIPTextModel(lowercase_ ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ): snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) snake_case_ = image / 2 + 0.5 if str(lowercase_ ).startswith('''mps''' ): snake_case_ = torch.manual_seed(lowercase_ ) else: snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) snake_case_ = { '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def A_ ( self : Union[str, Any] ): snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A_ ( self : Union[str, Any] ): snake_case_ = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , '''half''' ): snake_case_ = module.half() snake_case_ = CycleDiffusionPipeline(**lowercase_ ) snake_case_ = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) snake_case_ = self.get_dummy_inputs(lowercase_ ) snake_case_ = pipe(**lowercase_ ) snake_case_ = output.images snake_case_ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Optional[int] ): return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def A_ ( self : List[Any] ): return super().test_inference_batch_single_identical() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_save_load_optional_components() @skip_mps def A_ ( self : Union[str, Any] ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class a ( unittest.TestCase ): def A_ ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Union[str, Any] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def A_ ( self : List[str] ): snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) snake_case_ = init_image.resize((512, 512) ) snake_case_ = '''CompVis/stable-diffusion-v1-4''' snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() snake_case_ = '''A black colored car''' snake_case_ = '''A blue colored car''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , ) snake_case_ = output.images assert np.abs(image - expected_image ).max() < 2e-2
56
0
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , __A , __A , __A , __A = 1.0 , __A = None , ): """simple docstring""" super().__init__() lowerCamelCase : Optional[Any] = initial_learning_rate lowerCamelCase : List[Any] = warmup_steps lowerCamelCase : Union[str, Any] = power lowerCamelCase : Optional[int] = decay_schedule_fn lowerCamelCase : Optional[Any] = name def __call__( self , __A ): """simple docstring""" with tf.name_scope(self.name or "WarmUp" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowerCamelCase : str = tf.cast(lowercase_ , tf.floataa ) lowerCamelCase : List[str] = tf.cast(self.warmup_steps , tf.floataa ) lowerCamelCase : Any = global_step_float / warmup_steps_float lowerCamelCase : Optional[int] = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , ) def _snake_case ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.999 , SCREAMING_SNAKE_CASE_ = 1E-8 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ): '''simple docstring''' lowerCamelCase : Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , ) if num_warmup_steps: lowerCamelCase : Union[str, Any] = WarmUp( initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , ) if weight_decay_rate > 0.0: lowerCamelCase : Optional[int] = AdamWeightDecay( learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__UpperCAmelCase , ) else: lowerCamelCase : str = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase_ ( _lowerCamelCase ): '''simple docstring''' def __init__( self , __A = 0.001 , __A = 0.9 , __A = 0.999 , __A = 1e-7 , __A = False , __A = 0.0 , __A = None , __A = None , __A = "AdamWeightDecay" , **__A , ): """simple docstring""" super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) lowerCamelCase : Any = weight_decay_rate lowerCamelCase : Optional[int] = include_in_weight_decay lowerCamelCase : Union[str, Any] = exclude_from_weight_decay @classmethod def _snake_case ( cls , __A ): """simple docstring""" lowerCamelCase : Optional[int] = {"WarmUp": WarmUp} return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ ) lowerCamelCase : List[Any] = tf.constant( self.weight_decay_rate , name="adam_weight_decay_rate" ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" lowerCamelCase : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , ) return tf.no_op() def _snake_case ( self , __A , __A=None , **__A ): """simple docstring""" lowerCamelCase , lowerCamelCase : int = list(zip(*lowercase_ ) ) return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ ) def _snake_case ( self , __A , __A , __A ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowerCamelCase : str = apply_state or {} lowerCamelCase : Optional[Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowerCamelCase : Optional[int] = self._fallback_apply_state(lowercase_ , lowercase_ ) lowerCamelCase : Optional[int] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _snake_case ( self , __A , __A , __A=None ): """simple docstring""" lowerCamelCase , lowerCamelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) lowerCamelCase : str = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ ) def _snake_case ( self , __A , __A , __A , __A=None ): """simple docstring""" lowerCamelCase , lowerCamelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ ) lowerCamelCase : Optional[Any] = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ ) with tf.control_dependencies([decay] ): return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : Optional[int] = super().get_config() config.update({"weight_decay_rate": self.weight_decay_rate} ) return config def _snake_case ( self , __A ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase_ , lowercase_ ) is not None: return False return True class UpperCAmelCase_ ( _lowerCamelCase ): '''simple docstring''' def __init__( self ): """simple docstring""" lowerCamelCase : Optional[Any] = [] lowerCamelCase : int = None @property def _snake_case ( self ): """simple docstring""" if self._accum_steps is None: lowerCamelCase : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def _snake_case ( self ): """simple docstring""" if not self._gradients: raise ValueError("The accumulator should be called first to initialize the gradients" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , __A ): """simple docstring""" if not self._gradients: lowerCamelCase : List[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase_ ) != len(self._gradients ): raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}""" ) for accum_gradient, gradient in zip(self._gradients , lowercase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase_ ) self._accum_steps.assign_add(1 ) def _snake_case ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase_ ) )
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : str = logging.get_logger(__name__) a : str = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class a ( _lowerCamelCase ): snake_case_ = "big_bird" def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ): super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , ) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = rescale_embeddings snake_case_ = attention_type snake_case_ = use_bias snake_case_ = block_size snake_case_ = num_random_blocks snake_case_ = classifier_dropout class a ( _lowerCamelCase ): @property def A_ ( self : str ): if self.task == "multiple-choice": snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
56
0
class snake_case_ : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : Union[str, Any] ) ->List[Any]: # we need a list not a string, so do something to change the type snake_case_ = arr.split(''',''' ) def snake_case__( self : Union[str, Any] ) ->Optional[Any]: snake_case_ = [int(self.array[0] )] * len(self.array ) snake_case_ = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): snake_case_ = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) snake_case_ = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": lowerCAmelCase_ = input('''please input some numbers:''') lowerCAmelCase_ = SubArray(whole_array) lowerCAmelCase_ = array.solve_sub_array() print(('''the results is:''', re))
8
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str: '''simple docstring''' assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ = SqlDatasetReader( '''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} snake_case_ = features.copy() if features else default_expected_features snake_case_ = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read() _check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con: snake_case_ = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write() snake_case_ = iter_sql_file(__UpperCAmelCase ) snake_case_ = iter_sql_file(__UpperCAmelCase ) for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = tmp_path / '''cache''' snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' ) snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read() with pytest.raises(__UpperCAmelCase ): SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
56
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case = logging.get_logger(__name__) _snake_case = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class lowercase ( _lowerCamelCase ): _a = "marian" _a = ["past_key_values"] _a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _a=5_8101 , _a=None , _a=1024 , _a=12 , _a=4096 , _a=16 , _a=12 , _a=4096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=5_8100 , _a=False , _a=5_8100 , _a=0 , _a=0 , _a=True , **_a , ) -> int: _A : Any = vocab_size _A : int = decoder_vocab_size or vocab_size _A : Optional[int] = max_position_embeddings _A : Tuple = d_model _A : Any = encoder_ffn_dim _A : List[str] = encoder_layers _A : Union[str, Any] = encoder_attention_heads _A : Tuple = decoder_ffn_dim _A : Optional[int] = decoder_layers _A : Any = decoder_attention_heads _A : Dict = dropout _A : Union[str, Any] = attention_dropout _A : Optional[Any] = activation_dropout _A : List[str] = activation_function _A : int = init_std _A : Union[str, Any] = encoder_layerdrop _A : Dict = decoder_layerdrop _A : List[Any] = use_cache _A : Optional[Any] = encoder_layers _A : Dict = scale_embedding # scale factor will be sqrt(d_model) if True _A : Union[str, Any] = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class lowercase ( _lowerCamelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def a__ ( self ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A : Dict = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _A : List[Any] = {0: """batch"""} _A : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: _A : str = {0: """batch""", 1: """decoder_sequence"""} _A : int = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. _A : List[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: _A , _A : List[str] = self.num_layers for i in range(lowercase_ ): _A : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} _A : int = {0: """batch""", 2: """past_sequence + sequence"""} else: _A : int = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def a__ ( self ) -> Union[str, Any]: if self.task in ["default", "seq2seq-lm"]: _A : Optional[Any] = super().outputs else: _A : List[Any] = super(lowercase_ , self ).outputs if self.use_past: _A , _A : List[str] = self.num_layers for i in range(lowercase_ ): _A : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} _A : int = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Dict: _A : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs _A : List[str] = seq_length if not self.use_past else 1 _A : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) _A : List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _A : str = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _A , _A : str = common_inputs["""input_ids"""].shape _A : List[str] = common_inputs["""decoder_input_ids"""].shape[1] _A , _A : Union[str, Any] = self.num_attention_heads _A : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _A : str = decoder_seq_length + 3 _A : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _A : Tuple = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) _A : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _A , _A : Optional[Any] = self.num_layers _A : List[str] = min(lowercase_ , lowercase_ ) _A : int = max(lowercase_ , lowercase_ ) - min_num_layers _A : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. _A : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> int: _A : str = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _A , _A : Optional[int] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _A : Any = seqlen + 2 _A , _A : List[str] = self.num_layers _A , _A : int = self.num_attention_heads _A : Dict = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _A : Union[str, Any] = common_inputs["""attention_mask"""].dtype _A : List[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) _A : List[Any] = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Dict: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A : Any = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _A : Dict = tokenizer.num_special_tokens_to_add(lowercase_ ) _A : str = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence _A : Tuple = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size _A : Optional[Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def a__ ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Any: if self.task in ["default", "seq2seq-lm"]: _A : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: _A : Union[str, Any] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def a__ ( self , _a , _a , _a , _a ) -> Tuple: if self.task in ["default", "seq2seq-lm"]: _A : List[str] = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: _A : Any = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) @property def a__ ( self ) -> str: return 1e-4
26
'''simple docstring''' from collections import defaultdict def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' snake_case_ = 1 snake_case_ = True for v in tree[start]: if v not in visited: ret += dfs(__UpperCAmelCase ) if ret % 2 == 0: cuts.append(__UpperCAmelCase ) return ret def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a ,a : Dict = 10, 9 a : Dict = defaultdict(list) a : dict[int, bool] = {} a : list[int] = [] a : Tuple = 0 a : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
56
0
"""simple docstring""" def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> str: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) __a = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__UpperCAmelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
45
'''simple docstring''' import math from collections.abc import Callable def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float: '''simple docstring''' snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
56
0
a ={ 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on a ={value: key for key, value in MORSE_CODE_DICT.items()} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : Optional[int] = 'Morse code here!' print(__UpperCAmelCase ) __lowerCamelCase : Tuple = encrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) __lowerCamelCase : List[str] = decrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) if __name__ == "__main__": main()
73
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCAmelCase, __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''', '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''', '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' ) return name def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" ) snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( ) -> Any: '''simple docstring''' snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(__UpperCAmelCase ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(__UpperCAmelCase ) snake_case_ = val # read in qkv matrices read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase ) model.load_state_dict(__UpperCAmelCase ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=__UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' ) # forward pass snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] ) assert outputs.shape == torch.Size(__UpperCAmelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase ) ) Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__UpperCAmelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__UpperCAmelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, ) image_processor.push_to_hub( repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) a : List[Any] = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
56
0
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) lowercase__ : Dict = None lowercase__ : Optional[int] = { '7B': 1_1_0_0_8, '13B': 1_3_8_2_4, '30B': 1_7_9_2_0, '65B': 2_2_0_1_6, '70B': 2_8_6_7_2, } lowercase__ : Union[str, Any] = { '7B': 1, '7Bf': 1, '13B': 2, '13Bf': 2, '30B': 4, '65B': 8, '70B': 8, '70Bf': 8, } def A_ ( snake_case : Optional[Any] , snake_case : Dict=1 , snake_case : Union[str, Any]=256 ) -> Dict: '''simple docstring''' return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def A_ ( snake_case : int ) -> Optional[int]: '''simple docstring''' with open(__UpperCAmelCase , '''r''' ) as f: return json.load(__UpperCAmelCase ) def A_ ( snake_case : Dict , snake_case : List[Any] ) -> Any: '''simple docstring''' with open(__UpperCAmelCase , '''w''' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def A_ ( snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Tuple=True ) -> Optional[int]: '''simple docstring''' os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) __UpperCamelCase = os.path.join(__UpperCAmelCase , '''tmp''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) __UpperCamelCase = read_json(os.path.join(__UpperCAmelCase , '''params.json''' ) ) __UpperCamelCase = NUM_SHARDS[model_size] __UpperCamelCase = params['''n_layers'''] __UpperCamelCase = params['''n_heads'''] __UpperCamelCase = n_heads // num_shards __UpperCamelCase = params['''dim'''] __UpperCamelCase = dim // n_heads __UpperCamelCase = 10000.0 __UpperCamelCase = 1.0 / (base ** (torch.arange(0 , __UpperCAmelCase , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: __UpperCamelCase = params['''n_kv_heads'''] # for GQA / MQA __UpperCamelCase = n_heads_per_shard // num_key_value_heads __UpperCamelCase = dim // num_key_value_heads else: # compatibility with other checkpoints __UpperCamelCase = n_heads __UpperCamelCase = n_heads_per_shard __UpperCamelCase = dim # permute for sliced rotary def permute(snake_case : Optional[Any] , snake_case : List[Any]=n_heads , snake_case : List[Any]=dim , snake_case : Union[str, Any]=dim ): return w.view(__UpperCAmelCase , dima // n_heads // 2 , 2 , __UpperCAmelCase ).transpose(1 , 2 ).reshape(__UpperCAmelCase , __UpperCAmelCase ) print(f"Fetching all parameters from the checkpoint at {input_base_path}." ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) __UpperCamelCase = torch.load(os.path.join(__UpperCAmelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' ) else: # Sharded __UpperCamelCase = [ torch.load(os.path.join(__UpperCAmelCase , f"consolidated.{i:02d}.pth" ) , map_location='''cpu''' ) for i in range(__UpperCAmelCase ) ] __UpperCamelCase = 0 __UpperCamelCase = {'''weight_map''': {}} for layer_i in range(__UpperCAmelCase ): __UpperCamelCase = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded __UpperCamelCase = { f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wq.weight"] ), f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wk.weight"] ), f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. __UpperCamelCase = { f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][ f"layers.{layer_i}.attention_norm.weight" ].clone(), f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][ f"layers.{layer_i}.ffn_norm.weight" ].clone(), } __UpperCamelCase = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for i in range(__UpperCAmelCase ) ] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCamelCase = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for i in range(__UpperCAmelCase ) ] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) __UpperCamelCase = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for i in range(__UpperCAmelCase ) ] , dim=0 , ).reshape(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__UpperCAmelCase )] , dim=1 ) __UpperCamelCase = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__UpperCAmelCase )] , dim=0 ) __UpperCamelCase = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__UpperCAmelCase )] , dim=1 ) __UpperCamelCase = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__UpperCAmelCase )] , dim=0 ) __UpperCamelCase = inv_freq for k, v in state_dict.items(): __UpperCamelCase = filename param_count += v.numel() torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCamelCase = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin" if model_size == "7B": # Unsharded __UpperCamelCase = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: __UpperCamelCase = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(__UpperCAmelCase )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__UpperCAmelCase )] , dim=0 ), } for k, v in state_dict.items(): __UpperCamelCase = filename param_count += v.numel() torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) # Write configs __UpperCamelCase = {'''total_size''': param_count * 2} write_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , '''pytorch_model.bin.index.json''' ) ) __UpperCamelCase = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 __UpperCamelCase = params['''multiple_of'''] if '''multiple_of''' in params else 256 __UpperCamelCase = LlamaConfig( hidden_size=__UpperCAmelCase , intermediate_size=compute_intermediate_size(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__UpperCAmelCase , ) config.save_pretrained(__UpperCAmelCase ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) __UpperCamelCase = LlamaForCausalLM.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__UpperCAmelCase ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(__UpperCAmelCase , safe_serialization=__UpperCAmelCase ) shutil.rmtree(__UpperCAmelCase ) def A_ ( snake_case : str , snake_case : str ) -> str: '''simple docstring''' __UpperCamelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." ) __UpperCamelCase = tokenizer_class(__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) def A_ ( ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , ) parser.add_argument( '''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , ) parser.add_argument( '''--output_dir''' , help='''Location to write HF model and tokenizer''' , ) parser.add_argument('''--safe_serialization''' , type=__UpperCAmelCase , help='''Whether or not to save using `safetensors`.''' ) __UpperCamelCase = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) __UpperCamelCase = os.path.join(args.input_dir , '''tokenizer.model''' ) write_tokenizer(args.output_dir , __UpperCAmelCase ) if __name__ == "__main__": main()
328
'''simple docstring''' import re def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' snake_case_ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) ) if __name__ == "__main__": a : Any = '0094702343221' print(is_sri_lankan_phone_number(phone))
56
0
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCAmelCase__ ( ) -> List[Any]: '''simple docstring''' lowercase = HfArgumentParser(__UpperCAmelCase ) lowercase = parser.parse_args_into_dataclasses()[0] lowercase = TensorFlowBenchmark(args=__UpperCAmelCase ) try: lowercase = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" lowercase = """ """.join(str(__UpperCAmelCase ).split(""" """ )[:-1] ) lowercase = """""" lowercase = eval(str(__UpperCAmelCase ).split(""" """ )[-1] ) lowercase = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: lowercase = full_error_msg + begin_error_msg + str(__UpperCAmelCase ) raise ValueError(__UpperCAmelCase ) benchmark.run() if __name__ == "__main__": main()
197
'''simple docstring''' import re from filelock import FileLock try: import nltk a : Union[str, Any] = True except (ImportError, ModuleNotFoundError): a : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' re.sub('''<n>''', '''''', __UpperCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) )
56
0