code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowerCamelCase__ : int = logging.get_logger(__name__) def UpperCAmelCase_ ( __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple=None ) -> str: return field(default_factory=lambda: default , metadata=__UpperCAmelCase ) @dataclass class lowerCamelCase_ : '''simple docstring''' lowercase_ = list_field( default=[] , metadata={ "help": ( "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version" " of all available models" ) } , ) lowercase_ = list_field( default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) lowercase_ = list_field( default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Use FP16 to accelerate inference."} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark training of model"} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Verbose memory tracing"} ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" } , ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Trace memory line by line"} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Save result to a CSV file"} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Save all print statements in a log file"} ) lowercase_ = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to print environment information"} ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use" " multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled" " for debugging / testing and on TPU." ) } , ) lowercase_ = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , ) lowercase_ = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , ) lowercase_ = field( default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , ) lowercase_ = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , ) lowercase_ = field( default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , ) lowercase_ = field( default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , ) lowercase_ = field(default=3 , metadata={"help": "Times an experiment will be run."} ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={ "help": ( "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain" " model weights." ) } , ) def lowerCAmelCase_ ( self : List[str] ): warnings.warn( F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" ' are deprecated in general and it is advised to use external Benchmarking libraries ' ' to benchmark Transformer models.' , _lowerCAmelCase , ) def lowerCAmelCase_ ( self : str ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowerCAmelCase_ ( self : Dict ): if len(self.models ) <= 0: raise ValueError( 'Please make sure you provide at least one model name / model identifier, *e.g.* `--models' ' bert-base-cased` or `args.models = [\'bert-base-cased\'].' ) return self.models @property def lowerCAmelCase_ ( self : Any ): if not self.multi_process: return False elif self.is_tpu: logger.info('Multiprocessing is currently not possible on TPU.' ) return False else: return True
31
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase__ : List[str] = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } lowerCamelCase__ : Optional[Any] = { 'google/fnet-base': 512, 'google/fnet-large': 512, } lowerCamelCase__ : List[Any] = '▁' class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ["input_ids", "token_type_ids"] lowercase_ = FNetTokenizer def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. SCREAMING_SNAKE_CASE_ = ( AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token ) super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = do_lower_case SCREAMING_SNAKE_CASE_ = remove_space SCREAMING_SNAKE_CASE_ = keep_accents SCREAMING_SNAKE_CASE_ = vocab_file SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE_ = [self.sep_token_id] SCREAMING_SNAKE_CASE_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ): if not os.path.isdir(_lowerCAmelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE_ = os.path.join( _lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ): copyfile(self.vocab_file , _lowerCAmelCase ) return (out_vocab_file,)
31
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class lowercase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_lowercase ) def lowercase__ ( self : Dict ): SCREAMING_SNAKE_CASE__ : Any = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''sgugger/tiny-distilbert-classification''' SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , ) SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Optional[Any] = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : int = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase , [config] ) SCREAMING_SNAKE_CASE__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : Any = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase , [config] ) SCREAMING_SNAKE_CASE__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Dict = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = TensorFlowBenchmark(_lowercase , [config] ) SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def lowercase__ ( self : str ): SCREAMING_SNAKE_CASE__ : str = '''patrickvonplaten/t5-tiny-random''' SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase , configs=[config] ) SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def lowercase__ ( self : str ): SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2''' SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : List[str] = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() ) def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_lowercase : Dict ): self.assertTrue(hasattr(_lowercase , '''sequential''' ) ) self.assertTrue(hasattr(_lowercase , '''cumulative''' ) ) self.assertTrue(hasattr(_lowercase , '''current''' ) ) self.assertTrue(hasattr(_lowercase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , ) SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase ) SCREAMING_SNAKE_CASE__ : str = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
250
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() a_ :Tuple = logging.get_logger(__name__) def a ( A__ , A__=False ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def a ( A__ , A__ , A__=False ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE__ : str = '''''' else: SCREAMING_SNAKE_CASE__ : Optional[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : str = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[-config.hidden_size :] def a ( A__ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(A__ , A__ ) def a ( A__ , A__ , A__ ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(A__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = val def a ( ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def a ( A__ , A__ , A__=False ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=A__ , ) SCREAMING_SNAKE_CASE__ : Dict = ViTHybridConfig(backbone_config=A__ , image_size=3_8_4 , num_labels=1_0_0_0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = False # load original model from timm SCREAMING_SNAKE_CASE__ : int = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) SCREAMING_SNAKE_CASE__ : Dict = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE__ : Tuple = '''huggingface/label-files''' SCREAMING_SNAKE_CASE__ : List[Any] = '''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) ) SCREAMING_SNAKE_CASE__ : int = {int(A__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTHybridModel(A__ ).eval() else: SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # create image processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_transform(**resolve_data_config({} , model=A__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = transform.transforms SCREAMING_SNAKE_CASE__ : Union[str, Any] = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } SCREAMING_SNAKE_CASE__ : List[Any] = ViTHybridImageProcessor( do_resize=A__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) SCREAMING_SNAKE_CASE__ : Dict = prepare_img() SCREAMING_SNAKE_CASE__ : Any = transform(A__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Dict = processor(A__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(A__ ) SCREAMING_SNAKE_CASE__ : Dict = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: SCREAMING_SNAKE_CASE__ : str = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1e-3 ) else: SCREAMING_SNAKE_CASE__ : Dict = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(A__ ) if push_to_hub: print(f"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(f"""ybelkada/{vit_name}""" ) processor.push_to_hub(f"""ybelkada/{vit_name}""" ) if __name__ == "__main__": a_ :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) a_ :Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
250
1
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowercase ( a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Tuple = SwinConfig() SCREAMING_SNAKE_CASE_ :List[str] = swin_name.split("_" ) SCREAMING_SNAKE_CASE_ :List[str] = name_split[1] SCREAMING_SNAKE_CASE_ :Any = int(name_split[4] ) SCREAMING_SNAKE_CASE_ :List[Any] = int(name_split[3][-1] ) if model_size == "tiny": SCREAMING_SNAKE_CASE_ :Optional[Any] = 96 SCREAMING_SNAKE_CASE_ :List[str] = (2, 2, 6, 2) SCREAMING_SNAKE_CASE_ :Dict = (3, 6, 12, 24) elif model_size == "small": SCREAMING_SNAKE_CASE_ :int = 96 SCREAMING_SNAKE_CASE_ :Optional[Any] = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ :Optional[Any] = (3, 6, 12, 24) elif model_size == "base": SCREAMING_SNAKE_CASE_ :str = 128 SCREAMING_SNAKE_CASE_ :int = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ :List[str] = (4, 8, 16, 32) else: SCREAMING_SNAKE_CASE_ :Union[str, Any] = 192 SCREAMING_SNAKE_CASE_ :Tuple = (2, 2, 18, 2) SCREAMING_SNAKE_CASE_ :List[str] = (6, 12, 24, 48) if "in22k" in swin_name: SCREAMING_SNAKE_CASE_ :str = 2_1841 else: SCREAMING_SNAKE_CASE_ :Any = 1000 SCREAMING_SNAKE_CASE_ :str = "huggingface/label-files" SCREAMING_SNAKE_CASE_ :Tuple = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE_ :Union[str, Any] = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) ) SCREAMING_SNAKE_CASE_ :List[Any] = {int(a ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ :str = idalabel SCREAMING_SNAKE_CASE_ :int = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE_ :Dict = img_size SCREAMING_SNAKE_CASE_ :List[Any] = num_classes SCREAMING_SNAKE_CASE_ :Optional[Any] = embed_dim SCREAMING_SNAKE_CASE_ :Optional[Any] = depths SCREAMING_SNAKE_CASE_ :List[Any] = num_heads SCREAMING_SNAKE_CASE_ :List[str] = window_size return config def lowercase ( a ): '''simple docstring''' if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_ :Optional[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_ :Optional[Any] = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: SCREAMING_SNAKE_CASE_ :Any = "encoder." + name if "attn.proj" in name: SCREAMING_SNAKE_CASE_ :int = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: SCREAMING_SNAKE_CASE_ :List[str] = name.replace("attn" , "attention.self" ) if "norm1" in name: SCREAMING_SNAKE_CASE_ :Union[str, Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: SCREAMING_SNAKE_CASE_ :Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_ :Any = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_ :str = name.replace("mlp.fc2" , "output.dense" ) if name == "norm.weight": SCREAMING_SNAKE_CASE_ :List[str] = "layernorm.weight" if name == "norm.bias": SCREAMING_SNAKE_CASE_ :str = "layernorm.bias" if "head" in name: SCREAMING_SNAKE_CASE_ :List[Any] = name.replace("head" , "classifier" ) else: SCREAMING_SNAKE_CASE_ :Optional[int] = "swin." + name return name def lowercase ( a , a ): '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_ :Optional[Any] = orig_state_dict.pop(a ) if "mask" in key: continue elif "qkv" in key: SCREAMING_SNAKE_CASE_ :Tuple = key.split("." ) SCREAMING_SNAKE_CASE_ :Optional[int] = int(key_split[1] ) SCREAMING_SNAKE_CASE_ :str = int(key_split[3] ) SCREAMING_SNAKE_CASE_ :Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE_ :Union[str, Any] = val[:dim, :] SCREAMING_SNAKE_CASE_ :List[Any] = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE_ :Any = val[-dim:, :] else: SCREAMING_SNAKE_CASE_ :Optional[Any] = val[ :dim ] SCREAMING_SNAKE_CASE_ :Any = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE_ :Optional[int] = val[ -dim: ] else: SCREAMING_SNAKE_CASE_ :Union[str, Any] = val return orig_state_dict def lowercase ( a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :int = timm.create_model(a , pretrained=a ) timm_model.eval() SCREAMING_SNAKE_CASE_ :Optional[int] = get_swin_config(a ) SCREAMING_SNAKE_CASE_ :Optional[int] = SwinForImageClassification(a ) model.eval() SCREAMING_SNAKE_CASE_ :Any = convert_state_dict(timm_model.state_dict() , a ) model.load_state_dict(a ) SCREAMING_SNAKE_CASE_ :Any = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE_ :Optional[int] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) ) SCREAMING_SNAKE_CASE_ :int = Image.open(requests.get(a , stream=a ).raw ) SCREAMING_SNAKE_CASE_ :Optional[int] = image_processor(images=a , return_tensors="pt" ) SCREAMING_SNAKE_CASE_ :Dict = timm_model(inputs["pixel_values"] ) SCREAMING_SNAKE_CASE_ :Dict = model(**a ).logits assert torch.allclose(a , a , atol=1e-3 ) print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(a ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
631
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowercase ( a , a ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer SCREAMING_SNAKE_CASE_ :List[Any] = flax_key_tuple[:-1] + ("weight",) SCREAMING_SNAKE_CASE_ :Any = torch.permute(a , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(a ): # linear layer SCREAMING_SNAKE_CASE_ :str = flax_key_tuple[:-1] + ("weight",) SCREAMING_SNAKE_CASE_ :Optional[int] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: SCREAMING_SNAKE_CASE_ :Union[str, Any] = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def lowercase ( a , a , a ): '''simple docstring''' if "metadata" in layer: SCREAMING_SNAKE_CASE_ :Dict = layer.split("metadata" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "".join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ :str = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: SCREAMING_SNAKE_CASE_ :str = layer.split("kvstore" ) SCREAMING_SNAKE_CASE_ :str = "".join(split_layer[0] )[:-1] SCREAMING_SNAKE_CASE_ :str = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer.split("/" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "/".join(split_layer[:-1] ) SCREAMING_SNAKE_CASE_ :int = (split_layer[-1],) if "kvstore/path" in layer: SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: SCREAMING_SNAKE_CASE_ :Tuple = "file" else: SCREAMING_SNAKE_CASE_ :str = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowercase ( a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :int = rename_keys(a ) SCREAMING_SNAKE_CASE_ :Union[str, Any] = {} for k, v in current_block.items(): SCREAMING_SNAKE_CASE_ :List[str] = v SCREAMING_SNAKE_CASE_ :Optional[Any] = new_current_block torch.save(a , a ) def lowercase ( a , a , a , a , a = WEIGHTS_NAME ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Optional[int] = convert_file_size_to_int(a ) SCREAMING_SNAKE_CASE_ :int = [] SCREAMING_SNAKE_CASE_ :str = {} SCREAMING_SNAKE_CASE_ :List[str] = 0 SCREAMING_SNAKE_CASE_ :Optional[int] = 0 os.makedirs(a , exist_ok=a ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: SCREAMING_SNAKE_CASE_ :int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] SCREAMING_SNAKE_CASE_ :Any = flatten_dict(a , sep="/" ) SCREAMING_SNAKE_CASE_ :Optional[Any] = {} for layer in checkpoint_info.keys(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = get_key_and_tensorstore_dict( a , a , a ) if curr_real_layer_name in all_layers: SCREAMING_SNAKE_CASE_ :str = content else: SCREAMING_SNAKE_CASE_ :Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file SCREAMING_SNAKE_CASE_ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(a ) SCREAMING_SNAKE_CASE_ :str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , a ) SCREAMING_SNAKE_CASE_ :Any = "/".join(a ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: SCREAMING_SNAKE_CASE_ :str = os.path.join( a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) del current_block SCREAMING_SNAKE_CASE_ :Tuple = {} SCREAMING_SNAKE_CASE_ :Dict = 0 SCREAMING_SNAKE_CASE_ :Optional[int] = raw_weights.to(getattr(a , a ) ) current_block_size += weight_size total_size += weight_size # Add the last block SCREAMING_SNAKE_CASE_ :Dict = os.path.join(a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) ) rename_and_save_block(a , a ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(a ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index SCREAMING_SNAKE_CASE_ :Optional[int] = {} SCREAMING_SNAKE_CASE_ :int = {} for idx, shard in enumerate(a ): SCREAMING_SNAKE_CASE_ :Optional[Any] = weights_name.replace( ".bin" , F"-{idx+1:05d}-of-{len(a ):05d}.bin" ) # len(sharded_state_dicts):05d} SCREAMING_SNAKE_CASE_ :Any = os.path.join(a , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) ) os.rename(a , os.path.join(a , a ) ) SCREAMING_SNAKE_CASE_ :List[Any] = shard for key in shard: SCREAMING_SNAKE_CASE_ :str = shard_file # Add the metadata SCREAMING_SNAKE_CASE_ :List[str] = {"total_size": total_size} SCREAMING_SNAKE_CASE_ :Optional[int] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_ :Optional[int] = json.dumps(a , indent=2 , sort_keys=a ) + "\n" f.write(a ) return metadata, index if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowercase ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer SCREAMING_SNAKE_CASE_ :Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) SCREAMING_SNAKE_CASE_ :str = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) SCREAMING_SNAKE_CASE_ :List[Any] = TaTokenizer.from_pretrained("t5-small" ) SCREAMING_SNAKE_CASE_ :Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer(a , return_tensors="pt" ).input_ids SCREAMING_SNAKE_CASE_ :List[str] = model.generate(a , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
631
1
'''simple docstring''' def __A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,) -> bool: '''simple docstring''' _UpperCamelCase : List[str] = set() # Replace all the whitespace in our sentence _UpperCamelCase : str = input_str.replace(" " ,"" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCAmelCase ) == 2_6 def __A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,) -> bool: '''simple docstring''' _UpperCamelCase : Optional[int] = [False] * 2_6 for char in input_str: if char.islower(): _UpperCamelCase : Any = True elif char.isupper(): _UpperCamelCase : Optional[int] = True return all(UpperCAmelCase ) def __A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" ,) -> bool: '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6 def __A ( ) -> None: '''simple docstring''' from timeit import timeit _UpperCamelCase : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" ,setup=UpperCAmelCase ) ) print(timeit("is_pangram_faster()" ,setup=UpperCAmelCase ) ) print(timeit("is_pangram_fastest()" ,setup=UpperCAmelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
204
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowerCAmelCase_ : Tuple = """true""" def __A ( UpperCAmelCase ,UpperCAmelCase=8_2 ,UpperCAmelCase=1_6 ) -> Union[str, Any]: '''simple docstring''' set_seed(4_2 ) _UpperCamelCase : List[Any] = RegressionModel() _UpperCamelCase : Any = deepcopy(UpperCAmelCase ) _UpperCamelCase : Tuple = RegressionDataset(length=UpperCAmelCase ) _UpperCamelCase : Union[str, Any] = DataLoader(UpperCAmelCase ,batch_size=UpperCAmelCase ) model.to(accelerator.device ) _UpperCamelCase , _UpperCamelCase : Dict = accelerator.prepare(UpperCAmelCase ,UpperCAmelCase ) return model, ddp_model, dataloader def __A ( UpperCAmelCase ,UpperCAmelCase=False ) -> List[str]: '''simple docstring''' _UpperCamelCase : str = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) _UpperCamelCase : Optional[Any] = load_dataset("glue" ,"mrpc" ,split="validation" ) def tokenize_function(UpperCAmelCase ): _UpperCamelCase : Tuple = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=UpperCAmelCase ,max_length=UpperCAmelCase ) return outputs with accelerator.main_process_first(): _UpperCamelCase : str = dataset.map( UpperCAmelCase ,batched=UpperCAmelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,) _UpperCamelCase : Optional[int] = tokenized_datasets.rename_column("label" ,"labels" ) def collate_fn(UpperCAmelCase ): if use_longest: return tokenizer.pad(UpperCAmelCase ,padding="longest" ,return_tensors="pt" ) return tokenizer.pad(UpperCAmelCase ,padding="max_length" ,max_length=1_2_8 ,return_tensors="pt" ) return DataLoader(UpperCAmelCase ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=1_6 ) def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict: '''simple docstring''' _UpperCamelCase : str = Accelerator(dispatch_batches=UpperCAmelCase ,split_batches=UpperCAmelCase ) _UpperCamelCase : Union[str, Any] = get_dataloader(UpperCAmelCase ,not dispatch_batches ) _UpperCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" ,return_dict=UpperCAmelCase ) _UpperCamelCase , _UpperCamelCase : Union[str, Any] = accelerator.prepare(UpperCAmelCase ,UpperCAmelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> int: '''simple docstring''' _UpperCamelCase : List[str] = [] for batch in dataloader: _UpperCamelCase , _UpperCamelCase : int = batch.values() with torch.no_grad(): _UpperCamelCase : Tuple = model(UpperCAmelCase ) _UpperCamelCase , _UpperCamelCase : List[Any] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCAmelCase ) targs.append(UpperCAmelCase ) _UpperCamelCase , _UpperCamelCase : int = torch.cat(UpperCAmelCase ), torch.cat(UpperCAmelCase ) return logits, targs def __A ( UpperCAmelCase ,UpperCAmelCase=8_2 ,UpperCAmelCase=False ,UpperCAmelCase=False ,UpperCAmelCase=1_6 ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_basic_setup(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) _UpperCamelCase , _UpperCamelCase : Tuple = generate_predictions(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) assert ( len(UpperCAmelCase ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCAmelCase )}''' def __A ( UpperCAmelCase = False ,UpperCAmelCase = False ) -> Tuple: '''simple docstring''' _UpperCamelCase : int = evaluate.load("glue" ,"mrpc" ) _UpperCamelCase , _UpperCamelCase : Any = get_mrpc_setup(UpperCAmelCase ,UpperCAmelCase ) # First do baseline _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = setup["no"] model.to(UpperCAmelCase ) model.eval() for batch in dataloader: batch.to(UpperCAmelCase ) with torch.inference_mode(): _UpperCamelCase : Optional[Any] = model(**UpperCAmelCase ) _UpperCamelCase : Any = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCAmelCase ,references=batch["labels"] ) _UpperCamelCase : List[str] = metric.compute() # Then do distributed _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): _UpperCamelCase : int = model(**UpperCAmelCase ) _UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 ) _UpperCamelCase : List[str] = batch["labels"] _UpperCamelCase , _UpperCamelCase : List[str] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCAmelCase ,references=UpperCAmelCase ) _UpperCamelCase : int = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def __A ( ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : Dict = Accelerator(split_batches=UpperCAmelCase ,dispatch_batches=UpperCAmelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(UpperCAmelCase ,UpperCAmelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: _UpperCamelCase : int = Accelerator(split_batches=UpperCAmelCase ,dispatch_batches=UpperCAmelCase ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(UpperCAmelCase ,9_9 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) _UpperCamelCase : int = Accelerator() test_torch_metrics(UpperCAmelCase ,5_1_2 ) accelerator.state._reset_state() def __A ( UpperCAmelCase ) -> List[str]: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
204
1
'''simple docstring''' from typing import Any def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): _validation( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) # Creates data structures and fill initial step UpperCAmelCase__ : dict = {} UpperCAmelCase__ : dict = {} for state in states_space: UpperCAmelCase__ : List[Any] = observations_space[0] UpperCAmelCase__ : Union[str, Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) UpperCAmelCase__ : Tuple = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(lowercase__ ) ): UpperCAmelCase__ : List[Any] = observations_space[o] UpperCAmelCase__ : Dict = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function UpperCAmelCase__ : Any = "" UpperCAmelCase__ : Optional[int] = -1 for k_state in states_space: UpperCAmelCase__ : str = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: UpperCAmelCase__ : Union[str, Any] = probability UpperCAmelCase__ : Optional[int] = k_state # Update probabilities and pointers dicts UpperCAmelCase__ : Optional[int] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) UpperCAmelCase__ : Any = arg_max # The final observation UpperCAmelCase__ : Optional[Any] = observations_space[len(lowercase__ ) - 1] # argmax for given final observation UpperCAmelCase__ : Union[str, Any] = "" UpperCAmelCase__ : Dict = -1 for k_state in states_space: UpperCAmelCase__ : Tuple = probabilities[(k_state, final_observation)] if probability > max_probability: UpperCAmelCase__ : Any = probability UpperCAmelCase__ : str = k_state UpperCAmelCase__ : List[str] = arg_max # Process pointers backwards UpperCAmelCase__ : List[str] = last_state UpperCAmelCase__ : List[Any] = [] for o in range(len(lowercase__ ) - 1 , -1 , -1 ): result.append(lowercase__ ) UpperCAmelCase__ : str = pointers[previous, observations_space[o]] result.reverse() return result def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): _validate_not_empty( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) _validate_lists(lowercase__ , lowercase__ ) _validate_dicts( lowercase__ , lowercase__ , lowercase__ ) def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def snake_case_ ( lowercase__ , lowercase__ ): _validate_list(lowercase__ , "observations_space" ) _validate_list(lowercase__ , "states_space" ) def snake_case_ ( lowercase__ , lowercase__ ): if not isinstance(_object , lowercase__ ): UpperCAmelCase__ : Dict = F"""{var_name} must be a list""" raise ValueError(lowercase__ ) else: for x in _object: if not isinstance(lowercase__ , lowercase__ ): UpperCAmelCase__ : int = F"""{var_name} must be a list of strings""" raise ValueError(lowercase__ ) def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , ): _validate_dict(lowercase__ , "initial_probabilities" , lowercase__ ) _validate_nested_dict(lowercase__ , "transition_probabilities" ) _validate_nested_dict(lowercase__ , "emission_probabilities" ) def snake_case_ ( lowercase__ , lowercase__ ): _validate_dict(_object , lowercase__ , lowercase__ ) for x in _object.values(): _validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ): if not isinstance(_object , lowercase__ ): UpperCAmelCase__ : Dict = F"""{var_name} must be a dict""" raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ): UpperCAmelCase__ : Union[str, Any] = F"""{var_name} all keys must be strings""" raise ValueError(lowercase__ ) if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ): UpperCAmelCase__ : Dict = "nested dictionary " if nested else "" UpperCAmelCase__ : Dict = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(lowercase__ ) if __name__ == "__main__": from doctest import testmod testmod()
199
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCAmelCase_ ( A ): '''simple docstring''' lowercase_ : torch.FloatTensor class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , snake_case__ : Tuple=3 , snake_case__ : Dict=3 , snake_case__ : Dict=("DownEncoderBlock2D",) , snake_case__ : Optional[Any]=(64,) , snake_case__ : List[Any]=2 , snake_case__ : Any=32 , snake_case__ : Tuple="silu" , snake_case__ : Tuple=True , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Tuple = layers_per_block UpperCAmelCase__ : Optional[int] = torch.nn.Convad( snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Union[str, Any] = nn.ModuleList([] ) # down UpperCAmelCase__ : Any = block_out_channels[0] for i, down_block_type in enumerate(snake_case__ ): UpperCAmelCase__ : List[str] = output_channel UpperCAmelCase__ : Dict = block_out_channels[i] UpperCAmelCase__ : Tuple = i == len(snake_case__ ) - 1 UpperCAmelCase__ : Dict = get_down_block( snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , ) self.down_blocks.append(snake_case__ ) # mid UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # out UpperCAmelCase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1e-6 ) UpperCAmelCase__ : Union[str, Any] = nn.SiLU() UpperCAmelCase__ : Dict = 2 * out_channels if double_z else out_channels UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 ) UpperCAmelCase__ : Union[str, Any] = False def UpperCamelCase ( self : int , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = x UpperCAmelCase__ : Dict = self.conv_in(snake_case__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : Dict ): def custom_forward(*snake_case__ : List[str] ): return module(*snake_case__ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: UpperCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ ) # middle UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ ) else: for down_block in self.down_blocks: UpperCAmelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ ) # middle UpperCAmelCase__ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ ) else: # down for down_block in self.down_blocks: UpperCAmelCase__ : Dict = down_block(snake_case__ ) # middle UpperCAmelCase__ : Optional[Any] = self.mid_block(snake_case__ ) # post-process UpperCAmelCase__ : Tuple = self.conv_norm_out(snake_case__ ) UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = self.conv_out(snake_case__ ) return sample class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : str , snake_case__ : int=3 , snake_case__ : str=3 , snake_case__ : Union[str, Any]=("UpDecoderBlock2D",) , snake_case__ : Dict=(64,) , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=32 , snake_case__ : str="silu" , snake_case__ : Any="group" , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Any = layers_per_block UpperCAmelCase__ : Any = nn.Convad( snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = nn.ModuleList([] ) UpperCAmelCase__ : str = in_channels if norm_type == "spatial" else None # mid UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # up UpperCAmelCase__ : Tuple = list(reversed(snake_case__ ) ) UpperCAmelCase__ : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): UpperCAmelCase__ : Dict = output_channel UpperCAmelCase__ : List[Any] = reversed_block_out_channels[i] UpperCAmelCase__ : List[Any] = i == len(snake_case__ ) - 1 UpperCAmelCase__ : Tuple = get_up_block( snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , ) self.up_blocks.append(snake_case__ ) UpperCAmelCase__ : str = output_channel # out if norm_type == "spatial": UpperCAmelCase__ : Optional[Any] = SpatialNorm(block_out_channels[0] , snake_case__ ) else: UpperCAmelCase__ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1e-6 ) UpperCAmelCase__ : Dict = nn.SiLU() UpperCAmelCase__ : Union[str, Any] = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 ) UpperCAmelCase__ : Union[str, Any] = False def UpperCamelCase ( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple=None ): '''simple docstring''' UpperCAmelCase__ : str = z UpperCAmelCase__ : List[str] = self.conv_in(snake_case__ ) UpperCAmelCase__ : Tuple = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : Dict ): def custom_forward(*snake_case__ : List[Any] ): return module(*snake_case__ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) UpperCAmelCase__ : List[Any] = sample.to(snake_case__ ) # up for up_block in self.up_blocks: UpperCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) else: # middle UpperCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ ) UpperCAmelCase__ : int = sample.to(snake_case__ ) # up for up_block in self.up_blocks: UpperCAmelCase__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ ) else: # middle UpperCAmelCase__ : Union[str, Any] = self.mid_block(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = sample.to(snake_case__ ) # up for up_block in self.up_blocks: UpperCAmelCase__ : int = up_block(snake_case__ , snake_case__ ) # post-process if latent_embeds is None: UpperCAmelCase__ : List[Any] = self.conv_norm_out(snake_case__ ) else: UpperCAmelCase__ : Any = self.conv_norm_out(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[Any] = self.conv_act(snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.conv_out(snake_case__ ) return sample class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]="random" , snake_case__ : Any=False , snake_case__ : Any=True ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Any = n_e UpperCAmelCase__ : str = vq_embed_dim UpperCAmelCase__ : List[Any] = beta UpperCAmelCase__ : List[Any] = legacy UpperCAmelCase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) UpperCAmelCase__ : Optional[Any] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) UpperCAmelCase__ : Optional[Any] = self.used.shape[0] UpperCAmelCase__ : Any = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": UpperCAmelCase__ : Union[str, Any] = self.re_embed UpperCAmelCase__ : Optional[Any] = self.re_embed + 1 print( F"""Remapping {self.n_e} indices to {self.re_embed} indices. """ F"""Using {self.unknown_index} for unknown indices.""" ) else: UpperCAmelCase__ : int = n_e UpperCAmelCase__ : Dict = sane_index_shape def UpperCamelCase ( self : Tuple , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Any = inds.shape assert len(snake_case__ ) > 1 UpperCAmelCase__ : Tuple = inds.reshape(ishape[0] , -1 ) UpperCAmelCase__ : Optional[Any] = self.used.to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long() UpperCAmelCase__ : Optional[int] = match.argmax(-1 ) UpperCAmelCase__ : str = match.sum(2 ) < 1 if self.unknown_index == "random": UpperCAmelCase__ : Union[str, Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: UpperCAmelCase__ : Tuple = self.unknown_index return new.reshape(snake_case__ ) def UpperCamelCase ( self : int , snake_case__ : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = inds.shape assert len(snake_case__ ) > 1 UpperCAmelCase__ : List[Any] = inds.reshape(ishape[0] , -1 ) UpperCAmelCase__ : int = self.used.to(snake_case__ ) if self.re_embed > self.used.shape[0]: # extra token UpperCAmelCase__ : Any = 0 # simply set to zero UpperCAmelCase__ : List[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ ) return back.reshape(snake_case__ ) def UpperCamelCase ( self : Any , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous() UpperCAmelCase__ : str = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z UpperCAmelCase__ : Dict = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 ) UpperCAmelCase__ : Any = self.embedding(snake_case__ ).view(z.shape ) UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : Any = None # compute loss for embedding if not self.legacy: UpperCAmelCase__ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: UpperCAmelCase__ : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients UpperCAmelCase__ : Any = z + (z_q - z).detach() # reshape back to match original input shape UpperCAmelCase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: UpperCAmelCase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis UpperCAmelCase__ : Optional[int] = self.remap_to_used(snake_case__ ) UpperCAmelCase__ : Optional[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: UpperCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : int ): '''simple docstring''' if self.remap is not None: UpperCAmelCase__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis UpperCAmelCase__ : Dict = self.unmap_to_all(snake_case__ ) UpperCAmelCase__ : Dict = indices.reshape(-1 ) # flatten again # get quantized latent vectors UpperCAmelCase__ : Optional[Any] = self.embedding(snake_case__ ) if shape is not None: UpperCAmelCase__ : List[str] = z_q.view(snake_case__ ) # reshape back to match original input shape UpperCAmelCase__ : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCAmelCase_ ( A ): '''simple docstring''' def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str]=False ): '''simple docstring''' UpperCAmelCase__ : Dict = parameters UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = torch.chunk(snake_case__ , 2 , dim=1 ) UpperCAmelCase__ : int = torch.clamp(self.logvar , -30.0 , 20.0 ) UpperCAmelCase__ : Optional[Any] = deterministic UpperCAmelCase__ : Dict = torch.exp(0.5 * self.logvar ) UpperCAmelCase__ : List[str] = torch.exp(self.logvar ) if self.deterministic: UpperCAmelCase__ : Dict = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[torch.Generator] = None ): '''simple docstring''' UpperCAmelCase__ : int = randn_tensor( self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype ) UpperCAmelCase__ : List[str] = self.mean + self.std * sample return x def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any=None ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Tuple=[1, 2, 3] ): '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) UpperCAmelCase__ : str = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ ) def UpperCamelCase ( self : List[str] ): '''simple docstring''' return self.mean
199
1
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class A__ ( unittest.TestCase ): lowerCamelCase__ : Union[str, Any] =MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase__ : List[Any] =TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase ( self ) -> Optional[int]: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase ( self ) -> str: """simple docstring""" __magic_name__ : List[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) __magic_name__ : Dict = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''}, ] , ) __magic_name__ : List[str] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser''', }, ] , ) __magic_name__ : Optional[Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self ) -> List[str]: """simple docstring""" __magic_name__ : str = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) __magic_name__ : List[Any] = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) __magic_name__ : Any = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) __magic_name__ : Optional[int] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, ] , ) __magic_name__ : Tuple = unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(lowerCamelCase , decimals=6 ) , [ [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def lowercase ( self ) -> str: """simple docstring""" __magic_name__ : str = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() __magic_name__ : Optional[Any] = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCamelCase , lowerCamelCase ) @slow @require_torch def lowercase ( self ) -> List[str]: """simple docstring""" __magic_name__ : Union[str, Any] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(lowerCamelCase ) @slow @require_tf def lowercase ( self ) -> List[str]: """simple docstring""" __magic_name__ : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(lowerCamelCase ) def lowercase ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" __magic_name__ : int = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase ) , [ {'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''}, ] , ) __magic_name__ : int = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCamelCase ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.2_5_1, '''token''': 2201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.2_1_4, '''token''': 12790, '''token_str''': ''' Lyon''', }, ] , ) __magic_name__ : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self ) -> Any: """simple docstring""" __magic_name__ : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) __magic_name__ : str = None __magic_name__ : int = None self.run_pipeline_test(lowerCamelCase , [] ) @require_tf def lowercase ( self ) -> Tuple: """simple docstring""" __magic_name__ : Any = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) __magic_name__ : Optional[int] = None __magic_name__ : Any = None self.run_pipeline_test(lowerCamelCase , [] ) def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) __magic_name__ : Dict = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) __magic_name__ : Dict = [ F'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]: """simple docstring""" __magic_name__ : List[Any] = fill_masker.tokenizer __magic_name__ : List[str] = fill_masker.model __magic_name__ : Dict = fill_masker( F'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) __magic_name__ : Tuple = fill_masker([F'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) __magic_name__ : Dict = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( lowerCamelCase , [ [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ], [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ], ] , ) with self.assertRaises(lowerCamelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowerCamelCase ): fill_masker('''This is''' ) self.run_test_top_k(lowerCamelCase , lowerCamelCase ) self.run_test_targets(lowerCamelCase , lowerCamelCase ) self.run_test_top_k_targets(lowerCamelCase , lowerCamelCase ) self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase , lowerCamelCase ) self.fill_mask_with_multiple_masks(lowerCamelCase , lowerCamelCase ) def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Dict: """simple docstring""" __magic_name__ : Optional[int] = tokenizer.get_vocab() __magic_name__ : List[Any] = sorted(vocab.keys() )[:2] # Pipeline argument __magic_name__ : Optional[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , targets=lowerCamelCase ) __magic_name__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) __magic_name__ : List[Any] = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , lowerCamelCase ) __magic_name__ : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCamelCase ) ) # Call argument __magic_name__ : Tuple = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) __magic_name__ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) __magic_name__ : str = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , lowerCamelCase ) __magic_name__ : List[str] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCamelCase ) ) # Score equivalence __magic_name__ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase ) __magic_name__ : Union[str, Any] = [top_mask['''token_str'''] for top_mask in outputs] __magic_name__ : Optional[int] = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCamelCase ) == set(lowerCamelCase ): __magic_name__ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCamelCase ) __magic_name__ : List[Any] = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) ) # Raises with invalid with self.assertRaises(lowerCamelCase ): __magic_name__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCamelCase ): __magic_name__ : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] ) with self.assertRaises(lowerCamelCase ): __magic_name__ : int = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' ) def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]: """simple docstring""" __magic_name__ : List[str] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , top_k=2 ) __magic_name__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) __magic_name__ : List[str] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) __magic_name__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( lowerCamelCase , [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ] , ) self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) ) def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Tuple: """simple docstring""" __magic_name__ : Any = tokenizer.get_vocab() __magic_name__ : List[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) # top_k=2, ntargets=3 __magic_name__ : Tuple = sorted(vocab.keys() )[:3] __magic_name__ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCamelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __magic_name__ : Any = [el['''token_str'''] for el in sorted(lowerCamelCase , key=lambda lowerCamelCase : x["score"] , reverse=lowerCamelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCamelCase ).issubset(lowerCamelCase ): __magic_name__ : List[str] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCamelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) ) def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" __magic_name__ : str = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) __magic_name__ : str = tokenizer.get_vocab() # String duplicates + id duplicates __magic_name__ : Dict = sorted(vocab.keys() )[:3] __magic_name__ : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]] __magic_name__ : Optional[int] = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=lowerCamelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCamelCase ) , 3 ) def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Any: """simple docstring""" __magic_name__ : str = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase ) __magic_name__ : Optional[Any] = fill_masker( F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( lowerCamelCase , [ [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ], [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ], [ {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, {'''sequence''': ANY(lowerCamelCase ), '''score''': ANY(lowerCamelCase ), '''token''': ANY(lowerCamelCase ), '''token_str''': ANY(lowerCamelCase )}, ], ] , )
336
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class A__ ( __SCREAMING_SNAKE_CASE ): lowerCamelCase__ : torch.FloatTensor lowerCamelCase__ : Optional[torch.FloatTensor] =None def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase=0.9_99, UpperCAmelCase="cosine", ) ->Optional[Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCAmelCase ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCAmelCase ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __magic_name__ : List[Any] = [] for i in range(UpperCAmelCase ): __magic_name__ : Tuple = i / num_diffusion_timesteps __magic_name__ : Optional[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCAmelCase ) / alpha_bar_fn(UpperCAmelCase ), UpperCAmelCase ) ) return torch.tensor(UpperCAmelCase, dtype=torch.floataa ) class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowerCamelCase__ : Union[str, Any] =1 @register_to_config def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.0_0_0_1 , lowerCamelCase = 0.0_2 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> Optional[Any]: """simple docstring""" if kwargs.get('''set_alpha_to_one''' , lowerCamelCase ) is not None: __magic_name__ : Any = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCamelCase , standard_warn=lowerCamelCase ) __magic_name__ : Tuple = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __magic_name__ : Any = torch.tensor(lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __magic_name__ : Union[str, Any] = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __magic_name__ : str = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __magic_name__ : List[str] = betas_for_alpha_bar(lowerCamelCase ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) __magic_name__ : Dict = 1.0 - self.betas __magic_name__ : List[str] = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __magic_name__ : str = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __magic_name__ : int = 1.0 # setable values __magic_name__ : List[str] = None __magic_name__ : Dict = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) ) def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor: """simple docstring""" return sample def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple: """simple docstring""" if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' F''' maximal {self.config.num_train_timesteps} timesteps.''' ) __magic_name__ : int = num_inference_steps __magic_name__ : Tuple = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __magic_name__ : Any = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa ) __magic_name__ : Optional[int] = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase ) self.timesteps += self.config.steps_offset def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: """simple docstring""" __magic_name__ : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __magic_name__ : List[Any] = self.alphas_cumprod[timestep] __magic_name__ : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __magic_name__ : Any = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __magic_name__ : Union[str, Any] = model_output elif self.config.prediction_type == "sample": __magic_name__ : Dict = model_output __magic_name__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __magic_name__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __magic_name__ : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __magic_name__ : Any = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __magic_name__ : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __magic_name__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase ) def __len__( self ) -> int: """simple docstring""" return self.config.num_train_timesteps
336
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class _a ( UpperCamelCase__ ): _lowercase : Any = '''bert''' def __init__( self: int , UpperCamelCase_: str=30_522 , UpperCamelCase_: str=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: str=3_072 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Union[str, Any]=1E-1_2 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: int="absolute" , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout class _a ( UpperCamelCase__ ): @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
43
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _a = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __A ( nn.Module ): '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' super().__init__() lowerCamelCase__ = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase ) lowerCamelCase__ = list(model.children() )[:-2] lowerCamelCase__ = nn.Sequential(*__lowerCAmelCase ) lowerCamelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.pool(self.model(__lowerCAmelCase ) ) lowerCamelCase__ = torch.flatten(__lowerCAmelCase , start_dim=2 ) lowerCamelCase__ = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )] lowerCamelCase__ = os.path.dirname(__lowerCAmelCase ) lowerCamelCase__ = tokenizer lowerCamelCase__ = labels lowerCamelCase__ = len(__lowerCAmelCase ) lowerCamelCase__ = max_seq_length lowerCamelCase__ = transforms def __len__( self ): '''simple docstring''' return len(self.data ) def __getitem__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__lowerCAmelCase ) ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = sentence[0], sentence[1:-1], sentence[-1] lowerCamelCase__ = sentence[: self.max_seq_length] lowerCamelCase__ = torch.zeros(self.n_classes ) lowerCamelCase__ = 1 lowerCamelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) lowerCamelCase__ = self.transforms(__lowerCAmelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def lowerCAmelCase__(__snake_case ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = [len(row['''sentence'''] ) for row in batch] lowerCamelCase__ , lowerCamelCase__ = len(__snake_case ), max(__snake_case ) lowerCamelCase__ = torch.zeros(__snake_case ,__snake_case ,dtype=torch.long ) lowerCamelCase__ = torch.zeros(__snake_case ,__snake_case ,dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(__snake_case ,__snake_case ) ): lowerCamelCase__ = input_row['''sentence'''] lowerCamelCase__ = 1 lowerCamelCase__ = torch.stack([row['''image'''] for row in batch] ) lowerCamelCase__ = torch.stack([row['''label'''] for row in batch] ) lowerCamelCase__ = torch.stack([row['''image_start_token'''] for row in batch] ) lowerCamelCase__ = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCAmelCase__() -> Optional[int]: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCAmelCase__() -> Any: '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] ,std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] ,), ] )
481
0
"""simple docstring""" import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever lowerCAmelCase__ = logging.getLogger(__name__) class snake_case ( __lowercase ): def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , ) SCREAMING_SNAKE_CASE_ = None def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually SCREAMING_SNAKE_CASE_ = self._infer_socket_ifname() # avoid clash with the NCCL port SCREAMING_SNAKE_CASE_ = str(distributed_port + 1 ) SCREAMING_SNAKE_CASE_ = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def _lowercase (self ): """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=torch.floataa ): """simple docstring""" SCREAMING_SNAKE_CASE_ = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) return target_tensor def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names SCREAMING_SNAKE_CASE_ = next((addr for addr in addrs if addr.startswith('''e''' )) , SCREAMING_SNAKE_CASE_ ) return ifname def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" if not dist.is_initialized(): SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ ) # distributed training SCREAMING_SNAKE_CASE_ = dist.get_world_size(group=self.process_group ) # gather logic SCREAMING_SNAKE_CASE_ = None if self._is_main(): SCREAMING_SNAKE_CASE_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )] dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) # scatter logic SCREAMING_SNAKE_CASE_ = question_hidden_states.shape[0] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] if self._is_main(): assert len(SCREAMING_SNAKE_CASE_ ) == world_size SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa ) SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
709
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowercase ): def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = 8 # DPR tok SCREAMING_SNAKE_CASE_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok SCREAMING_SNAKE_CASE_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _lowercase (self ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def _lowercase (self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowercase (self , SCREAMING_SNAKE_CASE_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , ) return retriever def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) ) SCREAMING_SNAKE_CASE_ = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) SCREAMING_SNAKE_CASE_ = RagRetriever( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset() retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowercase (self ): """simple docstring""" import torch SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever() SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) SCREAMING_SNAKE_CASE_ = retriever( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowercase (self ): """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer() SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ ) retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]] SCREAMING_SNAKE_CASE_ = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ ) self.assertEqual( len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary.
628
0
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCamelCase__ : Any = NewType('DataClass', Any) lowerCamelCase__ : Tuple = NewType('DataClassType', Any) def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Tuple: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." ) def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> Callable[[str], Any]: SCREAMING_SNAKE_CASE_ = {str(__UpperCAmelCase ): choice for choice in choices} return lambda __UpperCAmelCase : str_to_choice.get(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase_ ( *, __UpperCAmelCase : Union[str, List[str]] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = dataclasses.MISSING , __UpperCAmelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCAmelCase : dict = None , **__UpperCAmelCase : Dict , ) -> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls SCREAMING_SNAKE_CASE_ = {} if aliases is not None: SCREAMING_SNAKE_CASE_ = aliases if help is not None: SCREAMING_SNAKE_CASE_ = help return dataclasses.field(metadata=__UpperCAmelCase , default=__UpperCAmelCase , default_factory=__UpperCAmelCase , **__UpperCAmelCase ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = 42 def __init__( self : Any , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : Dict ): # To make the default appear when using --help if "formatter_class" not in kwargs: SCREAMING_SNAKE_CASE_ = ArgumentDefaultsHelpFormatter super().__init__(**_lowerCAmelCase ) if dataclasses.is_dataclass(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [dataclass_types] SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase ) for dtype in self.dataclass_types: self._add_dataclass_arguments(_lowerCAmelCase ) @staticmethod def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ): SCREAMING_SNAKE_CASE_ = F"--{field.name}" SCREAMING_SNAKE_CASE_ = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _lowerCAmelCase ): raise RuntimeError( 'Unresolved type detected, which should have been done with the help of ' '`typing.get_type_hints` method by default' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('aliases' , [] ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [aliases] SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type ) if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__ ): raise ValueError( 'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because' ' the argument parser only supports one type per argument.' F" Problem encountered in field '{field.name}'." ) if type(_lowerCAmelCase ) not in field.type.__args__: # filter `str` in Union SCREAMING_SNAKE_CASE_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) SCREAMING_SNAKE_CASE_ = ( field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1] ) SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) SCREAMING_SNAKE_CASE_ = {} if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )): if origin_type is Literal: SCREAMING_SNAKE_CASE_ = field.type.__args__ else: SCREAMING_SNAKE_CASE_ = [x.value for x in field.type] SCREAMING_SNAKE_CASE_ = make_choice_type_function(kwargs['choices'] ) if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_ = field.default else: SCREAMING_SNAKE_CASE_ = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument SCREAMING_SNAKE_CASE_ = copy(_lowerCAmelCase ) # Hack because type=bool in argparse does not behave as we want. SCREAMING_SNAKE_CASE_ = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. SCREAMING_SNAKE_CASE_ = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way SCREAMING_SNAKE_CASE_ = default # This tells argparse we accept 0 or 1 value after --field_name SCREAMING_SNAKE_CASE_ = '?' # This is the value that will get picked if we do --field_name (without value) SCREAMING_SNAKE_CASE_ = True elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = field.type.__args__[0] SCREAMING_SNAKE_CASE_ = '+' if field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_ = field.default_factory() elif field.default is dataclasses.MISSING: SCREAMING_SNAKE_CASE_ = True else: SCREAMING_SNAKE_CASE_ = field.type if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_ = field.default elif field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE_ = field.default_factory() else: SCREAMING_SNAKE_CASE_ = True parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): SCREAMING_SNAKE_CASE_ = False parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **_lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : DataClassType ): if hasattr(_lowerCAmelCase , '_argument_group_name' ): SCREAMING_SNAKE_CASE_ = self.add_argument_group(dtype._argument_group_name ) else: SCREAMING_SNAKE_CASE_ = self try: SCREAMING_SNAKE_CASE_ = get_type_hints(_lowerCAmelCase ) except NameError: raise RuntimeError( F"Type resolution failed for {dtype}. Try declaring the class in global scope or " 'removing line of `from __future__ import annotations` which opts in Postponed ' 'Evaluation of Annotations (PEP 563)' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) ) raise RuntimeError( F"Type resolution failed for {dtype} on Python {python_version}. Try removing " 'line of `from __future__ import annotations` which opts in union types as ' '`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ' 'support Python versions that lower than 3.10, you need to use ' '`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ' '`X | None`.' ) from ex raise for field in dataclasses.fields(_lowerCAmelCase ): if not field.init: continue SCREAMING_SNAKE_CASE_ = type_hints[field.name] self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int=None , _lowerCAmelCase : str=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): SCREAMING_SNAKE_CASE_ = [] if args_filename: args_files.append(Path(_lowerCAmelCase ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values SCREAMING_SNAKE_CASE_ = ArgumentParser() args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' ) # Use only remaining args for further parsing (remove the args_file_flag) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = args_file_parser.parse_known_args(args=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase ) if cmd_args_file_paths: args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] ) SCREAMING_SNAKE_CASE_ = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last SCREAMING_SNAKE_CASE_ = file_args + args if args is not None else file_args + sys.argv[1:] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.parse_known_args(args=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys} for k in keys: delattr(_lowerCAmelCase , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(_lowerCAmelCase ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" ) return (*outputs,) def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ): SCREAMING_SNAKE_CASE_ = set(args.keys() ) SCREAMING_SNAKE_CASE_ = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init} SCREAMING_SNAKE_CASE_ = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase ) outputs.append(_lowerCAmelCase ) if not allow_extra_keys and unused_keys: raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}" ) return tuple(_lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ): with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file: SCREAMING_SNAKE_CASE_ = json.loads(open_json_file.read() ) SCREAMING_SNAKE_CASE_ = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ): SCREAMING_SNAKE_CASE_ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
31
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int: assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer" if number == 1: return 2 elif number < 1: SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0" raise ValueError(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_ = sylvester(number - 1 ) SCREAMING_SNAKE_CASE_ = num - 1 SCREAMING_SNAKE_CASE_ = num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
31
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = SwinConfig(image_size=1_92 ) if "base" in model_name: SCREAMING_SNAKE_CASE = 6 SCREAMING_SNAKE_CASE = 1_28 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (4, 8, 16, 32) elif "large" in model_name: SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = 1_92 SCREAMING_SNAKE_CASE = (2, 2, 18, 2) SCREAMING_SNAKE_CASE = (6, 12, 24, 48) else: raise ValueError("""Model not supported, only supports base and large variants""" ) SCREAMING_SNAKE_CASE = window_size SCREAMING_SNAKE_CASE = embed_dim SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = num_heads return config def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' if "encoder.mask_token" in name: SCREAMING_SNAKE_CASE = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" ) if "encoder.patch_embed.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "encoder.patch_embed.norm" in name: SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE = """layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE = """layernorm.bias""" if "decoder" in name: pass else: SCREAMING_SNAKE_CASE = """swin.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "attn_mask" in key: pass elif "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[2] ) SCREAMING_SNAKE_CASE = int(key_split[4] ) SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[ :dim ] SCREAMING_SNAKE_CASE = val[ dim : dim * 2 ] SCREAMING_SNAKE_CASE = val[ -dim: ] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""] SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE ) model.eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} ) SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits print(outputs.keys() ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""swin-base-simmim-window6-192""", type=str, choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""], help="""Name of the Swin SimMIM model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""", type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
708
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class UpperCamelCase__ : '''simple docstring''' def __init__( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict=13 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : List[str]=17 ,lowerCamelCase__ : Union[str, Any]=23 ,lowerCamelCase__ : Tuple=11 ,lowerCamelCase__ : Any=True ,) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = act_dim SCREAMING_SNAKE_CASE = state_dim SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = max_length SCREAMING_SNAKE_CASE = is_training def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) ) SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) ) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1000 ) SCREAMING_SNAKE_CASE = random_attention_mask((self.batch_size, self.seq_length) ) SCREAMING_SNAKE_CASE = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = DecisionTransformerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) self.parent.assertEqual(result.state_preds.shape ,states.shape ) self.parent.assertEqual(result.action_preds.shape ,actions.shape ) self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ( SCREAMING_SNAKE_CASE ), ) = config_and_inputs SCREAMING_SNAKE_CASE = { """states""": states, """actions""": actions, """rewards""": rewards, """returns_to_go""": returns_to_go, """timesteps""": timesteps, """attention_mask""": attention_mask, } return config, inputs_dict @require_torch class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Tuple = (DecisionTransformerModel,) if is_torch_available() else () __snake_case : Dict = () __snake_case : List[Any] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __snake_case : int = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __snake_case : Optional[Any] = False __snake_case : Optional[Any] = False __snake_case : int = False __snake_case : str = False __snake_case : Union[str, Any] = False __snake_case : Union[str, Any] = False __snake_case : List[Any] = False __snake_case : Optional[int] = False __snake_case : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = DecisionTransformerModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = [ """states""", """actions""", """rewards""", """returns_to_go""", """timesteps""", """attention_mask""", ] self.assertListEqual(arg_names[: len(lowerCamelCase__ )] ,lowerCamelCase__ ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = 2 # number of steps of autoregressive prediction we will perform SCREAMING_SNAKE_CASE = 10 # defined by the RL environment, may be normalized SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" ) SCREAMING_SNAKE_CASE = model.to(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model.config torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ) # env.reset() SCREAMING_SNAKE_CASE = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] ,device=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.floataa ).reshape(1 ,1 ,1 ) SCREAMING_SNAKE_CASE = state SCREAMING_SNAKE_CASE = torch.zeros(1 ,0 ,config.act_dim ,device=lowerCamelCase__ ,dtype=torch.floataa ) SCREAMING_SNAKE_CASE = torch.zeros(1 ,0 ,device=lowerCamelCase__ ,dtype=torch.floataa ) SCREAMING_SNAKE_CASE = torch.tensor(0 ,device=lowerCamelCase__ ,dtype=torch.long ).reshape(1 ,1 ) for step in range(lowerCamelCase__ ): SCREAMING_SNAKE_CASE = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=lowerCamelCase__ )] ,dim=1 ) SCREAMING_SNAKE_CASE = torch.cat([rewards, torch.zeros(1 ,1 ,device=lowerCamelCase__ )] ,dim=1 ) SCREAMING_SNAKE_CASE = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model( states=lowerCamelCase__ ,actions=lowerCamelCase__ ,rewards=lowerCamelCase__ ,returns_to_go=lowerCamelCase__ ,timesteps=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,) self.assertEqual(action_pred.shape ,actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ( # env.step(action) torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ), 1.0, False, {}, ) SCREAMING_SNAKE_CASE = action_pred[0, -1] SCREAMING_SNAKE_CASE = torch.cat([states, state] ,dim=1 ) SCREAMING_SNAKE_CASE = returns_to_go[0, -1] - reward SCREAMING_SNAKE_CASE = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 ) SCREAMING_SNAKE_CASE = torch.cat( [timesteps, torch.ones((1, 1) ,device=lowerCamelCase__ ,dtype=torch.long ) * (step + 1)] ,dim=1 )
116
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Dict = logging.get_logger(__name__) class lowercase__ ( lowercase ): lowercase__ = ["""pixel_values"""] def __init__( self : str ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : Union[str, Any] ,): '''simple docstring''' super().__init__(**lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = size if size is not None else {'height': 224, 'width': 224} _UpperCamelCase : Dict = get_size_dict(lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224} _UpperCamelCase : Dict = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ,param_name='crop_size' ) _UpperCamelCase : List[str] = do_resize _UpperCamelCase : Any = do_rescale _UpperCamelCase : List[str] = do_normalize _UpperCamelCase : Dict = do_center_crop _UpperCamelCase : str = crop_size _UpperCamelCase : List[Any] = size _UpperCamelCase : List[Any] = resample _UpperCamelCase : Optional[int] = rescale_factor _UpperCamelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _UpperCamelCase : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,): '''simple docstring''' _UpperCamelCase : List[str] = get_size_dict(lowerCamelCase__ ) if "shortest_edge" in size: _UpperCamelCase : Any = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _UpperCamelCase : int = (size['height'], size['width']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,): '''simple docstring''' _UpperCamelCase : str = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ): '''simple docstring''' return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ,): '''simple docstring''' return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : int = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,): '''simple docstring''' _UpperCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ ,param_name='crop_size' ,default_to_square=lowerCamelCase__ ) _UpperCamelCase : Optional[int] = resample if resample is not None else self.resample _UpperCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : Optional[int] = image_std if image_std is not None else self.image_std _UpperCamelCase : Optional[int] = size if size is not None else self.size _UpperCamelCase : Dict = get_size_dict(lowerCamelCase__ ) if not is_batched(lowerCamelCase__ ): _UpperCamelCase : Any = [images] if not valid_images(lowerCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _UpperCamelCase : Optional[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: _UpperCamelCase : List[str] = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images] if do_center_crop: _UpperCamelCase : Optional[Any] = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images] if do_rescale: _UpperCamelCase : Union[str, Any] = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images] if do_normalize: _UpperCamelCase : Tuple = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images] _UpperCamelCase : Dict = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images] _UpperCamelCase : Dict = {'pixel_values': images} return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
195
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case_ : Tuple = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Union[str, Any] = ['LayoutLMv2FeatureExtractor'] snake_case_ : str = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
195
1
'''simple docstring''' def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]: """simple docstring""" lowercase_ : list[list[float]] = [] for data in source_data: for i, el in enumerate(a ): if len(a ) < i + 1: data_lists.append([] ) data_lists[i].append(float(a ) ) return data_lists def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]: """simple docstring""" lowercase_ : list[list[float]] = [] for dlist, weight in zip(a , a ): lowercase_ : Tuple = min(a ) lowercase_ : Any = max(a ) lowercase_ : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowercase_ : str = f"Invalid weight of {weight:f} provided" raise ValueError(a ) score_lists.append(a ) return score_lists def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]: """simple docstring""" lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(a ): lowercase_ : List[Any] = final_scores[j] + ele return final_scores def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]: """simple docstring""" lowercase_ : int = get_data(a ) lowercase_ : Optional[int] = calculate_each_score(a , a ) lowercase_ : Dict = generate_final_scores(a ) # append scores to source data for i, ele in enumerate(a ): source_data[i].append(a ) return source_data
704
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule A: int = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
0
import os import re import shutil import sys import tempfile import unittest import black lowerCamelCase :List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowerCamelCase :Any = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n' class UpperCAmelCase ( unittest.TestCase ): def _A ( self: Optional[Any] ): _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) ) _a = self.transformer_dir shutil.copy( os.path.join(__UpperCamelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , ) def _A ( self: Optional[int] ): _a = '''src/transformers''' shutil.rmtree(self.transformer_dir ) def _A ( self: Optional[Any] , __UpperCamelCase: str , __UpperCamelCase: Dict , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Optional[int]=None ): _a = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: _a = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _a = black.format_str(__UpperCamelCase , mode=__UpperCamelCase ) _a = os.path.join(self.transformer_dir , '''new_code.py''' ) with open(__UpperCamelCase , '''w''' , newline='''\n''' ) as f: f.write(__UpperCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase ) with open(__UpperCamelCase , '''r''' ) as f: self.assertTrue(f.read() , __UpperCamelCase ) def _A ( self: List[Any] ): _a = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def _A ( self: Optional[int] ): # Base copy consistency self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __UpperCamelCase , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __UpperCamelCase ) , ) # Copy consistency with a really long name _a = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __UpperCamelCase , __UpperCamelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __UpperCamelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __UpperCamelCase ) , ) def _A ( self: Any ): _a = check_copies.LOCALIZED_READMES['''README_zh-hans.md'''] _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),''' ''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**''' ''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders''' ''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang''' ''' Luong, Quoc V. Le, Christopher D. Manning.''' ) _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.''' ''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文''' ''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and''' ''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same''' ''' method has been applied to compress GPT2 into''' ''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into''' ''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),''' ''' Multilingual BERT into''' ''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German''' ''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自''' ''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather''' ''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,''' ''' Christopher D. Manning 发布。\n''' ) _a , _a = check_copies.convert_to_localized_md( __UpperCamelCase , __UpperCamelCase , localized_readme['''format_model_list'''] ) self.assertFalse(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _a , _a = check_copies.convert_to_localized_md( __UpperCamelCase , __UpperCamelCase , localized_readme['''format_model_list'''] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__UpperCamelCase ) _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the''' ''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for''' ''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong''' ''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.''' ) _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and''' ''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) _a = ( '''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the''' ''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of''' ''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian''' ''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n''' ) _a , _a = check_copies.convert_to_localized_md( __UpperCamelCase , __UpperCamelCase , localized_readme['''format_model_list'''] ) # Check if the model link is synchronized. self.assertEqual(__UpperCamelCase , __UpperCamelCase )
487
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase :List[str] = logging.get_logger(__name__) lowerCamelCase :Any = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } lowerCamelCase :Tuple = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } lowerCamelCase :Tuple = {'facebook/blenderbot_small-90M': 512} def __snake_case ( _UpperCamelCase ) -> Any: _a = set() _a = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _a = char _a = set(_UpperCamelCase ) return pairs class UpperCAmelCase ( __snake_case ): a: Any = VOCAB_FILES_NAMES a: Any = PRETRAINED_VOCAB_FILES_MAP a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a: int = ["input_ids", "attention_mask"] def __init__( self: str , __UpperCamelCase: List[str] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any]="__start__" , __UpperCamelCase: int="__end__" , __UpperCamelCase: Optional[int]="__unk__" , __UpperCamelCase: int="__null__" , **__UpperCamelCase: str , ): super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase ) with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle: _a = json.load(__UpperCamelCase ) _a = {v: k for k, v in self.encoder.items()} with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle: _a = merges_handle.read().split('''\n''' )[1:-1] _a = [tuple(merge.split() ) for merge in merges] _a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _a = {} @property def _A ( self: List[Any] ): return len(self.encoder ) def _A ( self: Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _A ( self: Optional[int] , __UpperCamelCase: str ): if token in self.cache: return self.cache[token] _a = re.sub('''([.,!?()])''' , R''' \1''' , __UpperCamelCase ) _a = re.sub('''(\')''' , R''' \1 ''' , __UpperCamelCase ) _a = re.sub(R'''\s{2,}''' , ''' ''' , __UpperCamelCase ) if "\n" in token: _a = token.replace('''\n''' , ''' __newln__''' ) _a = token.split(''' ''' ) _a = [] for token in tokens: if not len(__UpperCamelCase ): continue _a = token.lower() _a = tuple(__UpperCamelCase ) _a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) _a = get_pairs(__UpperCamelCase ) if not pairs: words.append(__UpperCamelCase ) continue while True: _a = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _a , _a = bigram _a = [] _a = 0 while i < len(__UpperCamelCase ): try: _a = word.index(__UpperCamelCase , __UpperCamelCase ) new_word.extend(word[i:j] ) _a = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _a = tuple(__UpperCamelCase ) _a = new_word if len(__UpperCamelCase ) == 1: break else: _a = get_pairs(__UpperCamelCase ) _a = '''@@ '''.join(__UpperCamelCase ) _a = word[:-4] _a = word words.append(__UpperCamelCase ) return " ".join(__UpperCamelCase ) def _A ( self: str , __UpperCamelCase: str ): _a = [] _a = re.findall(R'''\S+\n?''' , __UpperCamelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) ) return split_tokens def _A ( self: Optional[int] , __UpperCamelCase: str ): _a = token.lower() return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) ) def _A ( self: List[Any] , __UpperCamelCase: int ): return self.decoder.get(__UpperCamelCase , self.unk_token ) def _A ( self: Any , __UpperCamelCase: List[str] ): _a = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip() return out_string def _A ( self: int , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _a = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join( __UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' ) _a = 0 with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) _a = token_index writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file
487
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : str = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
709
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase( lowerCAmelCase__ ): __snake_case : List[str] = ['image_processor', 'tokenizer'] __snake_case : Optional[int] = 'Pix2StructImageProcessor' __snake_case : Optional[int] = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ :Optional[Any] = False super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = 2_048 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE : str , ): """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.tokenizer SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer( text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values SCREAMING_SNAKE_CASE_ :Any = self.image_processor( SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # add pixel_values and bbox SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.image_processor( SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text is not None and not self.image_processor.is_vqa: SCREAMING_SNAKE_CASE_ :Any = self.tokenizer( text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) if "attention_mask" in text_encoding: SCREAMING_SNAKE_CASE_ :List[Any] = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: SCREAMING_SNAKE_CASE_ :Any = text_encoding.pop('input_ids' ) else: SCREAMING_SNAKE_CASE_ :Any = None if text_encoding is not None: encoding_image_processor.update(SCREAMING_SNAKE_CASE ) return encoding_image_processor def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def _lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def _lowercase ( self : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ :Tuple = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ :Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
233
0
import baseaa def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" return baseaa.aaaencode(string.encode('''utf-8''' ) ) def __lowerCamelCase ( UpperCAmelCase_ : bytes ): """simple docstring""" return baseaa.aaadecode(_UpperCAmelCase ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
445
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Dict = "linear" __UpperCamelCase: Tuple = "cosine" __UpperCamelCase: Optional[int] = "cosine_with_restarts" __UpperCamelCase: str = "polynomial" __UpperCamelCase: int = "constant" __UpperCamelCase: Any = "constant_with_warmup" __UpperCamelCase: Optional[Any] = "piecewise_constant" def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int = -1 ) -> Any: """simple docstring""" return LambdaLR(_UpperCAmelCase , lambda _UpperCAmelCase : 1 , last_epoch=_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int = -1 ) -> Optional[int]: """simple docstring""" def lr_lambda(_UpperCAmelCase : int ): if current_step < num_warmup_steps: return float(_UpperCAmelCase ) / float(max(1.0 , _UpperCAmelCase ) ) return 1.0 return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : str , _UpperCAmelCase : int = -1 ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = {} _UpperCAmelCase : Union[str, Any] = step_rules.split("," ) for rule_str in rule_list[:-1]: _UpperCAmelCase , _UpperCAmelCase : Tuple = rule_str.split(":" ) _UpperCAmelCase : Dict = int(_UpperCAmelCase ) _UpperCAmelCase : int = float(_UpperCAmelCase ) _UpperCAmelCase : Dict = value _UpperCAmelCase : List[str] = float(rule_list[-1] ) def create_rules_function(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ): def rule_func(_UpperCAmelCase : int ) -> float: _UpperCAmelCase : Union[str, Any] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(_UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _UpperCAmelCase : Optional[Any] = create_rules_function(_UpperCAmelCase , _UpperCAmelCase ) return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , last_epoch=_UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]: """simple docstring""" def lr_lambda(_UpperCAmelCase : int ): if current_step < num_warmup_steps: return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : float = 0.5 , _UpperCAmelCase : int = -1 ) -> Any: """simple docstring""" def lr_lambda(_UpperCAmelCase : List[Any] ): if current_step < num_warmup_steps: return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) ) _UpperCAmelCase : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optimizer , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = -1 ) -> int: """simple docstring""" def lr_lambda(_UpperCAmelCase : int ): if current_step < num_warmup_steps: return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) ) _UpperCAmelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=1e-7 , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : List[Any]=-1 ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(_UpperCAmelCase : int ): if current_step < num_warmup_steps: return float(_UpperCAmelCase ) / float(max(1 , _UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _UpperCAmelCase : int = lr_init - lr_end _UpperCAmelCase : Optional[Any] = num_training_steps - num_warmup_steps _UpperCAmelCase : int = 1 - (current_step - num_warmup_steps) / decay_steps _UpperCAmelCase : Optional[int] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __SCREAMING_SNAKE_CASE : int = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def UpperCamelCase_ ( _UpperCAmelCase : Union[str, SchedulerType] , _UpperCAmelCase : Optimizer , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : int = -1 , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = SchedulerType(_UpperCAmelCase ) _UpperCAmelCase : str = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(_UpperCAmelCase , last_epoch=_UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(_UpperCAmelCase , step_rules=_UpperCAmelCase , last_epoch=_UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(_UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( _UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , num_cycles=_UpperCAmelCase , last_epoch=_UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( _UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , power=_UpperCAmelCase , last_epoch=_UpperCAmelCase , ) return schedule_func( _UpperCAmelCase , num_warmup_steps=_UpperCAmelCase , num_training_steps=_UpperCAmelCase , last_epoch=_UpperCAmelCase )
244
0
def lowerCAmelCase ( UpperCamelCase__ : int ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE: Any = int(snake_case__ ) if n_element < 1: __SCREAMING_SNAKE_CASE: Dict = ValueError('''a should be a positive number''' ) raise my_error __SCREAMING_SNAKE_CASE: Dict = [1] __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Union[str, Any] = (0, 0, 0) __SCREAMING_SNAKE_CASE: Union[str, Any] = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") lowerCAmelCase : Tuple = hamming(int(n)) print("""-----------------------------------------------------""") print(f'''The list with nth numbers is: {hamming_numbers}''') print("""-----------------------------------------------------""")
700
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor lowerCAmelCase : str = logging.get_logger(__name__) class a ( __lowercase ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
146
0
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') UpperCamelCase = logging.getLogger(__name__) @dataclass class __lowerCamelCase : """simple docstring""" snake_case__ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) snake_case__ = field( default=UpperCamelCase__ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) snake_case__ = field( default=UpperCamelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) snake_case__ = field( default=UpperCamelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) snake_case__ = field( default=UpperCamelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) @dataclass class __lowerCamelCase : """simple docstring""" snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Train language if it is different from the evaluation language."} ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) snake_case__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) snake_case__ = field( default=UpperCamelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) snake_case__ = field( default=UpperCamelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def _A ( ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_xnli" , lowerCAmelCase_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase__ = training_args.get_process_log_level() logger.setLevel(lowerCAmelCase_ ) datasets.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.set_verbosity(lowerCAmelCase_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. lowerCAmelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: lowerCAmelCase__ = load_dataset( "xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: lowerCAmelCase__ = load_dataset( "xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = train_dataset.features["label"].names if training_args.do_eval: lowerCAmelCase__ = load_dataset( "xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = eval_dataset.features["label"].names if training_args.do_predict: lowerCAmelCase__ = load_dataset( "xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = predict_dataset.features["label"].names # Labels lowerCAmelCase__ = len(lowerCAmelCase_ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel={str(lowerCAmelCase_ ): label for i, label in enumerate(lowerCAmelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: lowerCAmelCase__ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowerCAmelCase__ = False def preprocess_function(lowerCAmelCase_ : int ): # Tokenize the texts return tokenizer( examples["premise"] , examples["hypothesis"] , padding=lowerCAmelCase_ , max_length=data_args.max_seq_length , truncation=lowerCAmelCase_ , ) if training_args.do_train: if data_args.max_train_samples is not None: lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_train_samples ) lowerCAmelCase__ = train_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): lowerCAmelCase__ = train_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , ) # Log a few random samples from the training set: for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) if training_args.do_eval: if data_args.max_eval_samples is not None: lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples ) lowerCAmelCase__ = eval_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): lowerCAmelCase__ = eval_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: lowerCAmelCase__ = min(len(lowerCAmelCase_ ) , data_args.max_predict_samples ) lowerCAmelCase__ = predict_dataset.select(range(lowerCAmelCase_ ) ) with training_args.main_process_first(desc="prediction dataset map pre-processing" ): lowerCAmelCase__ = predict_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , ) # Get the metric function lowerCAmelCase__ = evaluate.load("xnli" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(lowerCAmelCase_ : EvalPrediction ): lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 ) return metric.compute(predictions=lowerCAmelCase_ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowerCAmelCase__ = default_data_collator elif training_args.fpaa: lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) else: lowerCAmelCase__ = None # Initialize our Trainer lowerCAmelCase__ = Trainer( model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , ) # Training if training_args.do_train: lowerCAmelCase__ = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase__ = last_checkpoint lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ ) lowerCAmelCase__ = train_result.metrics lowerCAmelCase__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ ) ) lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , lowerCAmelCase_ ) trainer.save_metrics("train" , lowerCAmelCase_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowerCAmelCase_ ) lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ ) lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics("eval" , lowerCAmelCase_ ) trainer.save_metrics("eval" , lowerCAmelCase_ ) # Prediction if training_args.do_predict: logger.info("*** Predict ***" ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ , metric_key_prefix="predict" ) lowerCAmelCase__ = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCAmelCase_ ) ) lowerCAmelCase__ = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics("predict" , lowerCAmelCase_ ) trainer.save_metrics("predict" , lowerCAmelCase_ ) lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=1 ) lowerCAmelCase__ = os.path.join(training_args.output_dir , "predictions.txt" ) if trainer.is_world_process_zero(): with open(lowerCAmelCase_ , "w" ) as writer: writer.write("index\tprediction\n" ) for index, item in enumerate(lowerCAmelCase_ ): lowerCAmelCase__ = label_list[item] writer.write(F'{index}\t{item}\n' ) if __name__ == "__main__": main()
61
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _a : str = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b" _a : Dict = str(bin(UpperCamelCase_ ) )[2:] _a : str = max(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) , b_binary.zfill(UpperCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
471
0
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowerCamelCase_: Tuple = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b" lowerCamelCase_: Optional[Any] = str(bin(_UpperCAmelCase ) )[2:] lowerCamelCase_: Union[str, Any] = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
705
class a__ ( __SCREAMING_SNAKE_CASE ): pass class a__ ( __SCREAMING_SNAKE_CASE ): pass class a__ : def __init__( self : int ) -> Tuple: """simple docstring""" lowerCamelCase_: Any = [ [], [], [], ] def lowerCAmelCase ( self : int , A_ : int , A_ : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(A_ ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : List[str] ) -> str: """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_: Optional[Any] = [] def lowerCAmelCase ( self : List[str] , A_ : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(A_ ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: lowerCamelCase_: str = min(self.queue ) self.queue.remove(A_ ) return data def __str__( self : str ) -> str: """simple docstring""" return str(self.queue ) def UpperCAmelCase_ ( ): lowerCamelCase_: Union[str, Any] = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(_UpperCAmelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(_UpperCAmelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def UpperCAmelCase_ ( ): lowerCamelCase_: str = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(_UpperCAmelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(_UpperCAmelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
584
0
'''simple docstring''' def a_ ( __snake_case : int = 50 ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =[1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
676
'''simple docstring''' import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" snake_case = """data2vec-audio""" def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) snake_case_ = hidden_size snake_case_ = feat_extract_activation snake_case_ = list(UpperCAmelCase_ ) snake_case_ = list(UpperCAmelCase_ ) snake_case_ = list(UpperCAmelCase_ ) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = conv_pos_kernel_size snake_case_ = len(self.conv_dim ) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size snake_case_ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # adapter snake_case_ = add_adapter snake_case_ = adapter_kernel_size snake_case_ = adapter_stride snake_case_ = num_adapter_layers snake_case_ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case_ = list(UpperCAmelCase_ ) snake_case_ = list(UpperCAmelCase_ ) snake_case_ = list(UpperCAmelCase_ ) snake_case_ = xvector_output_dim @property def _lowercase ( self ): return math.prod(self.conv_stride )
508
0
"""simple docstring""" from math import log from scipy.constants import Boltzmann, physical_constants A: Tuple = 3_0_0 # TEMPERATURE (unit = K) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , ): if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _snake_case ( UpperCamelCase : list[list[float]] ): UpperCAmelCase : str = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix UpperCAmelCase : Optional[int] = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creates a copy of the matrix with swapped positions of the elements UpperCAmelCase : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]] UpperCAmelCase , UpperCAmelCase : Any = matrix[1][1], matrix[0][0] UpperCAmelCase , UpperCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(UpperCamelCase ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule UpperCAmelCase : str = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError("""This matrix has no inverse.""" ) # Creating cofactor matrix UpperCAmelCase : Optional[int] = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] UpperCAmelCase : str = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) UpperCAmelCase : List[str] = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) UpperCAmelCase : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) UpperCAmelCase : Optional[int] = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) UpperCAmelCase : Union[str, Any] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) UpperCAmelCase : Dict = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) UpperCAmelCase : Optional[Any] = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) UpperCAmelCase : str = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) UpperCAmelCase : str = array(UpperCamelCase ) for i in range(3 ): for j in range(3 ): UpperCAmelCase : Any = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix UpperCAmelCase : Tuple = array(UpperCamelCase ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(UpperCamelCase ) # Calculate the inverse of the matrix return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
359
0
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = BarthezTokenizer UpperCamelCase = BarthezTokenizerFast UpperCamelCase = True UpperCamelCase = True def lowercase__ ( self : List[str] ) -> int: '''simple docstring''' super().setUp() UpperCAmelCase_ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCAmelCase ) UpperCAmelCase_ = tokenizer def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = "<pad>" UpperCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(_UpperCAmelCase ) , 101122 ) def lowercase__ ( self : Any ) -> str: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 101122 ) @require_torch def lowercase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase_ = [0, 57, 3018, 70307, 91, 2] UpperCAmelCase_ = self.tokenizer( _UpperCAmelCase , max_length=len(_UpperCAmelCase ) , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="pt" ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowercase__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = "I was born in 92000, and this is falsé." UpperCAmelCase_ = tokenizer.tokenize(_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase_ = self.get_rust_tokenizer() UpperCAmelCase_ = tokenizer.encode(_UpperCAmelCase ) UpperCAmelCase_ = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) @slow def lowercase__ ( self : Union[str, Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. UpperCAmelCase_ = [ "Le transformeur est un modèle d'apprentissage profond introduit en 2017, " "utilisé principalement dans le domaine du traitement automatique des langues (TAL).", "À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus " "pour gérer des données séquentielles, telles que le langage naturel, pour des tâches " "telles que la traduction et la synthèse de texte.", ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_UpperCAmelCase , )
82
from ...configuration_utils import PretrainedConfig from ...utils import logging A_: List[str] = logging.get_logger(__name__) A_: str = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _lowercase ( _UpperCAmelCase ): """simple docstring""" lowerCAmelCase__ = 'cvt' def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=[7, 3, 3] , UpperCAmelCase=[4, 2, 2] , UpperCAmelCase=[2, 1, 1] , UpperCAmelCase=[64, 192, 384] , UpperCAmelCase=[1, 3, 6] , UpperCAmelCase=[1, 2, 10] , UpperCAmelCase=[4.0, 4.0, 4.0] , UpperCAmelCase=[0.0, 0.0, 0.0] , UpperCAmelCase=[0.0, 0.0, 0.0] , UpperCAmelCase=[0.0, 0.0, 0.1] , UpperCAmelCase=[True, True, True] , UpperCAmelCase=[False, False, True] , UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , UpperCAmelCase=[3, 3, 3] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=[2, 2, 2] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=[1, 1, 1] , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , **UpperCAmelCase , ): '''simple docstring''' super().__init__(**UpperCAmelCase ) _lowercase = num_channels _lowercase = patch_sizes _lowercase = patch_stride _lowercase = patch_padding _lowercase = embed_dim _lowercase = num_heads _lowercase = depth _lowercase = mlp_ratio _lowercase = attention_drop_rate _lowercase = drop_rate _lowercase = drop_path_rate _lowercase = qkv_bias _lowercase = cls_token _lowercase = qkv_projection_method _lowercase = kernel_qkv _lowercase = padding_kv _lowercase = stride_kv _lowercase = padding_q _lowercase = stride_q _lowercase = initializer_range _lowercase = layer_norm_eps
398
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> bool: lowerCamelCase_ = len(__UpperCamelCase ) lowerCamelCase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCamelCase_ = True # sum is not zero and set is empty then false for i in range(1 ,required_sum + 1 ): lowerCamelCase_ = False for i in range(1 ,arr_len + 1 ): for j in range(1 ,required_sum + 1 ): if arr[i - 1] > j: lowerCamelCase_ = subset[i - 1][j] if arr[i - 1] <= j: lowerCamelCase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
384
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]: print('Loading config file...' ) def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ): lowerCamelCase_ = [] for k, v in d.items(): lowerCamelCase_ = parent_key + sep + k if parent_key else k if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() ) else: items.append((new_key, v) ) return dict(__UpperCamelCase ) lowerCamelCase_ = argparse.Namespace() with open(__UpperCamelCase ,'r' ) as yaml_file: try: lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader ) lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase ) for k, v in flat_cfg.items(): setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) ) return config def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: lowerCamelCase_ = MobileViTVaConfig() lowerCamelCase_ = False # dataset if task_name.startswith('imagenet1k_' ): lowerCamelCase_ = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowerCamelCase_ = 3_84 else: lowerCamelCase_ = 2_56 lowerCamelCase_ = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowerCamelCase_ = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowerCamelCase_ = 3_84 else: lowerCamelCase_ = 2_56 lowerCamelCase_ = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowerCamelCase_ = 1_51 lowerCamelCase_ = 5_12 lowerCamelCase_ = 'ade20k-id2label.json' lowerCamelCase_ = True elif task_name.startswith('voc_' ): lowerCamelCase_ = 21 lowerCamelCase_ = 5_12 lowerCamelCase_ = 'pascal-voc-id2label.json' lowerCamelCase_ = True # orig_config lowerCamelCase_ = load_orig_config_file(__UpperCamelCase ) assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model" lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 ) assert ( getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 ) if "_deeplabv3" in task_name: lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] ) lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 ) lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 ) # id2label lowerCamelCase_ = 'huggingface/label-files' lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) ) lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = dct.pop(__UpperCamelCase ) lowerCamelCase_ = val def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int: if base_model: lowerCamelCase_ = '' else: lowerCamelCase_ = 'mobilevitv2.' lowerCamelCase_ = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowerCamelCase_ = k[8:] else: lowerCamelCase_ = k if ".block." in k: lowerCamelCase_ = k_new.replace('.block.' ,'.' ) if ".conv." in k: lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' ) if ".norm." in k: lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' ) if "conv_1." in k: lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if f'''layer_{i}.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' ) if ".red_1x1." in k: lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' ) for i in [3, 4, 5]: if f'''layer_{i}.0.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if f'''layer_{i}.1.local_rep.0.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if f'''layer_{i}.1.local_rep.1.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowerCamelCase_ = [0, 1] elif i == 4: lowerCamelCase_ = [0, 1, 2, 3] elif i == 5: lowerCamelCase_ = [0, 1, 2] for j in j_in: if f'''layer_{i}.1.global_rep.{j}.''' in k: lowerCamelCase_ = k_new.replace( f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if f'''layer_{i}.1.global_rep.{j+1}.''' in k: lowerCamelCase_ = k_new.replace( f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if f'''layer_{i}.1.conv_proj.''' in k: lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' ) if "pre_norm_attn.1." in k: lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' ) if "pre_norm_ffn.0." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' ) if "classifier.1." in k: lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' ) if "seg_head." in k: lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' ) if ".aspp_layer." in k: lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' ) if ".aspp_pool." in k: lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' ) rename_keys.append((k, k_new) ) return rename_keys def _UpperCamelCase ( __UpperCamelCase ) -> Dict: lowerCamelCase_ = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__UpperCamelCase ) for k in keys_to_ignore: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def _UpperCamelCase ( ) -> Any: lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase ) # load original state_dict lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval() lowerCamelCase_ = False else: lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval() lowerCamelCase_ = False # remove and rename some keys of load the original model lowerCamelCase_ = checkpoint remove_unused_keys(__UpperCamelCase ) lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # load modified state_dict model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 ) lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' ) lowerCamelCase_ = model(**__UpperCamelCase ) # verify classification model if task_name.startswith('imagenet' ): lowerCamelCase_ = outputs.logits lowerCamelCase_ = logits.argmax(-1 ).item() print('Predicted class:' ,model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ) assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) A_ = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
384
1
import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __lowercase = '''src/transformers''' __lowercase = '''docs/source/en/tasks''' def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCamelCase :Tuple = f.readlines() # Find the start prompt. __UpperCamelCase :List[str] = 0 while not lines[start_index].startswith(SCREAMING_SNAKE_CASE ): start_index += 1 start_index += 1 __UpperCamelCase :Dict = start_index while not lines[end_index].startswith(SCREAMING_SNAKE_CASE ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __lowercase = direct_transformers_import(TRANSFORMERS_PATH) __lowercase = { '''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, '''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, '''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, '''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, '''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, '''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, '''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, '''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, '''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, '''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, '''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, '''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, '''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, '''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, '''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __lowercase = { '''summarization.md''': ('''nllb''',), '''translation.md''': ('''nllb''',), } def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :str = TASK_GUIDE_TO_MODELS[task_guide] __UpperCamelCase :Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE , set() ) __UpperCamelCase :int = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = _find_text_in_file( filename=os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) __UpperCamelCase :int = get_model_list_for_task(SCREAMING_SNAKE_CASE ) if current_list != new_list: if overwrite: with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" ''' to fix this.''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __lowercase = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
167
from __future__ import annotations import numpy as np def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE ) if rows != columns: __UpperCamelCase :Dict = ( '''\'table\' has to be of square shaped array but got a ''' f"""{rows}x{columns} array:\n{table}""" ) raise ValueError(SCREAMING_SNAKE_CASE ) __UpperCamelCase :int = np.zeros((rows, columns) ) __UpperCamelCase :Tuple = np.zeros((rows, columns) ) for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) __UpperCamelCase :Tuple = (table[i][j] - total) / upper[j][j] __UpperCamelCase :Optional[int] = 1 for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Dict = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
167
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class _snake_case ( unittest.TestCase ): '''simple docstring''' def A__ ( self: Dict ) -> int: UpperCAmelCase_ : Any = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } UpperCAmelCase_ : Any = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 128, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 142, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(lowerCamelCase_ ) ,lowerCamelCase_ ) def A__ ( self: int ) -> Tuple: UpperCAmelCase_ : Dict = np.random.randn(3 ,4 ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) ,x.transpose() ) ) UpperCAmelCase_ : Union[str, Any] = np.random.randn(3 ,4 ,5 ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ,axes=(1, 2, 0) ) ,x.transpose((1, 2, 0) ) ) ) @require_torch def A__ ( self: List[str] ) -> List[str]: UpperCAmelCase_ : Optional[int] = np.random.randn(3 ,4 ) UpperCAmelCase_ : Tuple = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) ,transpose(lowerCamelCase_ ).numpy() ) ) UpperCAmelCase_ : Dict = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : Any = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ,axes=(1, 2, 0) ) ,transpose(lowerCamelCase_ ,axes=(1, 2, 0) ).numpy() ) ) @require_tf def A__ ( self: Any ) -> List[Any]: UpperCAmelCase_ : Dict = np.random.randn(3 ,4 ) UpperCAmelCase_ : Optional[int] = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) ,transpose(lowerCamelCase_ ).numpy() ) ) UpperCAmelCase_ : Tuple = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : int = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ,axes=(1, 2, 0) ) ,transpose(lowerCamelCase_ ,axes=(1, 2, 0) ).numpy() ) ) @require_flax def A__ ( self: Any ) -> str: UpperCAmelCase_ : List[str] = np.random.randn(3 ,4 ) UpperCAmelCase_ : int = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ) ,np.asarray(transpose(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : str = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : Union[str, Any] = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(transpose(lowerCamelCase_ ,axes=(1, 2, 0) ) ,np.asarray(transpose(lowerCamelCase_ ,axes=(1, 2, 0) ) ) ) ) def A__ ( self: Optional[int] ) -> Dict: UpperCAmelCase_ : Tuple = np.random.randn(3 ,4 ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(4, 3) ) ,np.reshape(lowerCamelCase_ ,(4, 3) ) ) ) UpperCAmelCase_ : Optional[Any] = np.random.randn(3 ,4 ,5 ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(12, 5) ) ,np.reshape(lowerCamelCase_ ,(12, 5) ) ) ) @require_torch def A__ ( self: Any ) -> Dict: UpperCAmelCase_ : Tuple = np.random.randn(3 ,4 ) UpperCAmelCase_ : Any = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(4, 3) ) ,reshape(lowerCamelCase_ ,(4, 3) ).numpy() ) ) UpperCAmelCase_ : Optional[int] = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : List[str] = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(12, 5) ) ,reshape(lowerCamelCase_ ,(12, 5) ).numpy() ) ) @require_tf def A__ ( self: Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = np.random.randn(3 ,4 ) UpperCAmelCase_ : int = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(4, 3) ) ,reshape(lowerCamelCase_ ,(4, 3) ).numpy() ) ) UpperCAmelCase_ : int = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : List[Any] = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(12, 5) ) ,reshape(lowerCamelCase_ ,(12, 5) ).numpy() ) ) @require_flax def A__ ( self: List[Any] ) -> Dict: UpperCAmelCase_ : str = np.random.randn(3 ,4 ) UpperCAmelCase_ : Union[str, Any] = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(4, 3) ) ,np.asarray(reshape(lowerCamelCase_ ,(4, 3) ) ) ) ) UpperCAmelCase_ : List[str] = np.random.randn(3 ,4 ,5 ) UpperCAmelCase_ : Tuple = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(reshape(lowerCamelCase_ ,(12, 5) ) ,np.asarray(reshape(lowerCamelCase_ ,(12, 5) ) ) ) ) def A__ ( self: Optional[Any] ) -> Any: UpperCAmelCase_ : List[str] = np.random.randn(1 ,3 ,4 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) ,np.squeeze(lowerCamelCase_ ) ) ) UpperCAmelCase_ : Optional[int] = np.random.randn(1 ,4 ,1 ,5 ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ,axis=2 ) ,np.squeeze(lowerCamelCase_ ,axis=2 ) ) ) @require_torch def A__ ( self: str ) -> Any: UpperCAmelCase_ : Union[str, Any] = np.random.randn(1 ,3 ,4 ) UpperCAmelCase_ : Any = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) ,squeeze(lowerCamelCase_ ).numpy() ) ) UpperCAmelCase_ : Any = np.random.randn(1 ,4 ,1 ,5 ) UpperCAmelCase_ : Tuple = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ,axis=2 ) ,squeeze(lowerCamelCase_ ,axis=2 ).numpy() ) ) @require_tf def A__ ( self: Any ) -> Tuple: UpperCAmelCase_ : Any = np.random.randn(1 ,3 ,4 ) UpperCAmelCase_ : Dict = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) ,squeeze(lowerCamelCase_ ).numpy() ) ) UpperCAmelCase_ : Optional[Any] = np.random.randn(1 ,4 ,1 ,5 ) UpperCAmelCase_ : Dict = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ,axis=2 ) ,squeeze(lowerCamelCase_ ,axis=2 ).numpy() ) ) @require_flax def A__ ( self: Optional[int] ) -> str: UpperCAmelCase_ : Any = np.random.randn(1 ,3 ,4 ) UpperCAmelCase_ : Optional[int] = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ) ,np.asarray(squeeze(lowerCamelCase_ ) ) ) ) UpperCAmelCase_ : Optional[Any] = np.random.randn(1 ,4 ,1 ,5 ) UpperCAmelCase_ : Union[str, Any] = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(squeeze(lowerCamelCase_ ,axis=2 ) ,np.asarray(squeeze(lowerCamelCase_ ,axis=2 ) ) ) ) def A__ ( self: List[str] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = np.random.randn(3 ,4 ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ ,axis=1 ) ,np.expand_dims(lowerCamelCase_ ,axis=1 ) ) ) @require_torch def A__ ( self: str ) -> Optional[int]: UpperCAmelCase_ : Dict = np.random.randn(3 ,4 ) UpperCAmelCase_ : Dict = torch.tensor(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ ,axis=1 ) ,expand_dims(lowerCamelCase_ ,axis=1 ).numpy() ) ) @require_tf def A__ ( self: Union[str, Any] ) -> List[str]: UpperCAmelCase_ : Dict = np.random.randn(3 ,4 ) UpperCAmelCase_ : Optional[int] = tf.constant(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ ,axis=1 ) ,expand_dims(lowerCamelCase_ ,axis=1 ).numpy() ) ) @require_flax def A__ ( self: Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = np.random.randn(3 ,4 ) UpperCAmelCase_ : Optional[Any] = jnp.array(lowerCamelCase_ ) self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ ,axis=1 ) ,np.asarray(expand_dims(lowerCamelCase_ ,axis=1 ) ) ) )
322
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase_ = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase_ = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class _snake_case ( __snake_case ): '''simple docstring''' A__ : Dict = VOCAB_FILES_NAMES A__ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP A__ : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION A__ : Optional[Any] = DPRContextEncoderTokenizer class _snake_case ( __snake_case ): '''simple docstring''' A__ : Tuple = VOCAB_FILES_NAMES A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP A__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION A__ : Optional[Any] = DPRQuestionEncoderTokenizer UpperCamelCase_ = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase_ = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(__snake_case ) class _snake_case : '''simple docstring''' def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,) elif titles is None or texts is None: UpperCAmelCase_ : Tuple = titles if texts is None else texts return super().__call__( lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,) UpperCAmelCase_ : Any = titles if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [titles] UpperCAmelCase_ : Tuple = texts if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [texts] UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : int = questions if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [questions] * n_passages assert len(lowerCamelCase_ ) == len( lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.''' UpperCAmelCase_ : int = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""] UpperCAmelCase_ : str = super().__call__(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""] UpperCAmelCase_ : Any = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ ,lowerCamelCase_ ) ] } if return_attention_mask is not False: UpperCAmelCase_ : Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : List[str] = attention_mask return self.pad(lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ) def A__ ( self: int ,lowerCamelCase_: BatchEncoding ,lowerCamelCase_: DPRReaderOutput ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 64 ,lowerCamelCase_: int = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Optional[int] = reader_input["""input_ids"""] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) UpperCAmelCase_ : Optional[int] = sorted(range(lowerCamelCase_ ) ,reverse=lowerCamelCase_ ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ ) UpperCAmelCase_ : Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase_ ,top_spans=lowerCamelCase_ ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase_ ,start_index=lowerCamelCase_ ,end_index=lowerCamelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(lowerCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def A__ ( self: Any ,lowerCamelCase_: List[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Union[str, Any] = [] for start_index, start_score in enumerate(lowerCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : Optional[int] = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x[1] ,reverse=lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]''' UpperCAmelCase_ : Any = end_index - start_index + 1 assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__snake_case ) class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' A__ : int = VOCAB_FILES_NAMES A__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP A__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION A__ : int = ["input_ids", "attention_mask"] A__ : str = DPRReaderTokenizer
322
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class UpperCamelCase_ : def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , ) -> Any: UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : str = 13 UpperCAmelCase_ : Dict = 7 UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[Any] = 99 UpperCAmelCase_ : Optional[int] = 32 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : int = 37 UpperCAmelCase_ : int = "gelu" UpperCAmelCase_ : Any = 0.1 UpperCAmelCase_ : Any = 0.1 UpperCAmelCase_ : List[Any] = 512 UpperCAmelCase_ : List[str] = 16 UpperCAmelCase_ : str = 2 UpperCAmelCase_ : List[str] = 0.0_2 UpperCAmelCase_ : int = 3 UpperCAmelCase_ : List[str] = 4 UpperCAmelCase_ : Optional[Any] = None def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Any = None if self.use_input_mask: UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : str = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Any = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Tuple: UpperCAmelCase_ : Tuple = TFEsmModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : Any = model(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase_ : Dict = model(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[int] = TFEsmModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = [input_ids, input_mask] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ) # Also check the case where encoder outputs are not passed UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Dict: UpperCAmelCase_ : Dict = TFEsmForMaskedLM(config=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = TFEsmForTokenClassification(config=lowerCAmelCase_ ) UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : int = model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: UpperCAmelCase_ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = TFEsmModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = TFEsmModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @unittest.skip("Protein models do not support embedding resizing." ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: pass @unittest.skip("Protein models do not support embedding resizing." ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer UpperCAmelCase_ : Tuple = model.get_bias() assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in name.items(): assert isinstance(lowerCAmelCase_ , tf.Variable ) else: UpperCAmelCase_ : Optional[Any] = model.get_output_embeddings() assert x is None UpperCAmelCase_ : List[Any] = model.get_bias() assert name is None @require_tf class UpperCamelCase_ (unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Any = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : int = model(lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , lowerCAmelCase_ ) # compare the actual values for a slice. UpperCAmelCase_ : int = tf.constant( [ [ [8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7], [-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5], [-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : List[Any] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Optional[Any] = tf.constant( [ [ [0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9], [0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2], [0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
95
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def _lowerCAmelCase ( lowerCamelCase_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( ): __lowercase = 2 while True: if is_prime(lowerCamelCase_ ): yield num num += 1 def _lowerCAmelCase ( lowerCamelCase_ : int = 2_0_0_0_0_0_0 ): return sum(takewhile(lambda lowerCamelCase_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f'''{solution() = }''')
502
0
def __a ( A__ : list ): if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] SCREAMING_SNAKE_CASE = grid[0] for row_n in range(1 , len(A__ ) ): SCREAMING_SNAKE_CASE = grid[row_n] SCREAMING_SNAKE_CASE = fill_row(A__ , A__ ) SCREAMING_SNAKE_CASE = grid[row_n] return grid[-1][-1] def __a ( A__ : list , A__ : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(A__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
698
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = tempfile.mkdtemp() SCREAMING_SNAKE_CASE = BlipImageProcessor() SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict , **__lowerCamelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor def _snake_case ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def _snake_case ( self : Tuple ): SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 ) SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _snake_case ( self : Optional[int] ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" ) SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case ( self : str ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _snake_case ( self : Any ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase ) SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) def _snake_case ( self : Dict ): SCREAMING_SNAKE_CASE = self.get_image_processor() SCREAMING_SNAKE_CASE = self.get_tokenizer() SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) SCREAMING_SNAKE_CASE = "lower newer" SCREAMING_SNAKE_CASE = self.prepare_image_inputs() SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
698
1
"""simple docstring""" from __future__ import annotations class lowercase__ : """simple docstring""" def __init__( self , _A=None ): '''simple docstring''' UpperCamelCase : Optional[Any] = data UpperCamelCase : Optional[Any] = None def __repr__( self ): '''simple docstring''' UpperCamelCase : Dict = [] UpperCamelCase : Dict = self while temp: string_rep.append(f"""{temp.data}""" ) UpperCamelCase : Union[str, Any] = temp.next return "->".join(_A ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): if not elements_list: raise Exception("""The Elements List is empty""" ) UpperCamelCase : Tuple = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): UpperCamelCase : Optional[Any] = Node(elements_list[i] ) UpperCamelCase : Optional[int] = current.next return head def UpperCamelCase (SCREAMING_SNAKE_CASE ): if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def UpperCamelCase (): from doctest import testmod testmod() UpperCamelCase : Dict = make_linked_list([14, 52, 14, 12, 43] ) print("""Linked List:""" ) print(SCREAMING_SNAKE_CASE ) print("""Elements in Reverse:""" ) print_reverse(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
102
"""simple docstring""" import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : Tuple = tmp_path / """cache""" UpperCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Tuple = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read() _check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = tmp_path / """cache""" UpperCamelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase : Union[str, Any] = features.copy() if features else default_expected_features UpperCamelCase : str = ( Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Optional[int] = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() _check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}, ] , ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : int = tmp_path / """cache""" UpperCamelCase : Optional[int] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""} UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : int = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} UpperCamelCase : str = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""} UpperCamelCase : Tuple = features.copy() UpperCamelCase : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Optional[int] = tmp_path / """cache""" UpperCamelCase : List[Any] = JsonDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[int] = tmp_path / """cache""" UpperCamelCase : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase : Dict = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read() _check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = jsonl_path elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = [jsonl_path] UpperCamelCase : List[Any] = tmp_path / """cache""" UpperCamelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase : Optional[int] = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() _check_json_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ): assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for split in splits: UpperCamelCase : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : int = tmp_path / """cache""" UpperCamelCase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCamelCase : Any = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = tmp_path / """cache""" UpperCamelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features UpperCamelCase : Optional[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCamelCase : Optional[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if split: UpperCamelCase : List[Any] = {split: jsonl_path} else: UpperCamelCase : Tuple = """train""" UpperCamelCase : Union[str, Any] = {"""train""": jsonl_path, """test""": jsonl_path} UpperCamelCase : Optional[Any] = tmp_path / """cache""" UpperCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} UpperCamelCase : Union[str, Any] = JsonDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return json.load(SCREAMING_SNAKE_CASE ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return [json.loads(SCREAMING_SNAKE_CASE ) for line in buffer] class lowercase__ : """simple docstring""" @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def _a ( self , _A , _A , _A ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(_A , _A , lines=_A ).write() buffer.seek(0 ) UpperCamelCase : Optional[Any] = load_json_function(_A ) assert isinstance(_A , _A ) assert isinstance(exported_content[0] , _A ) assert len(_A ) == 1_0 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def _a ( self , _A , _A , _A , _A , _A ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(_A , _A , lines=_A , orient=_A ).write() buffer.seek(0 ) UpperCamelCase : str = load_json(_A ) assert isinstance(_A , _A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_A , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(_A ) == 1_0 @pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] ) def _a ( self , _A , _A , _A ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(_A , _A , lines=_A , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase : List[Any] = load_json_function(_A ) assert isinstance(_A , _A ) assert isinstance(exported_content[0] , _A ) assert len(_A ) == 1_0 @pytest.mark.parametrize( """orient, container, keys, len_at""" , [ ("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None), ("""split""", dict, {"""columns""", """data"""}, """data"""), ("""index""", dict, set("""0123456789""" ), None), ("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""), ("""values""", list, None, None), ("""table""", dict, {"""schema""", """data"""}, """data"""), ] , ) def _a ( self , _A , _A , _A , _A , _A ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(_A , _A , lines=_A , orient=_A , num_proc=2 ).write() buffer.seek(0 ) UpperCamelCase : Dict = load_json(_A ) assert isinstance(_A , _A ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(_A , """keys""" ) and not hasattr(exported_content[0] , """keys""" ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(_A ) == 1_0 def _a ( self , _A ): '''simple docstring''' with pytest.raises(_A ): with io.BytesIO() as buffer: JsonDatasetWriter(_A , _A , num_proc=0 ) @pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] ) def _a ( self , _A , _A , _A , _A , _A ): '''simple docstring''' UpperCamelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}""" UpperCamelCase : Tuple = str(shared_datadir / f"""test_file.json.{extension}""" ) JsonDatasetWriter(_A , _A , compression=_A ).write() with fsspec.open(_A , """rb""" , compression="""infer""" ) as f: UpperCamelCase : Dict = f.read() with fsspec.open(_A , """rb""" , compression="""infer""" ) as f: UpperCamelCase : Tuple = f.read() assert exported_content == original_content
102
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class a ( __lowerCamelCase ): __lowerCAmelCase : str = """altclip_text_model""" def __init__( self :int ,__lowercase :List[Any]=2_5_0_0_0_2 ,__lowercase :List[Any]=1_0_2_4 ,__lowercase :str=2_4 ,__lowercase :Optional[Any]=1_6 ,__lowercase :str=4_0_9_6 ,__lowercase :str="gelu" ,__lowercase :List[Any]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_4 ,__lowercase :Optional[Any]=1 ,__lowercase :Optional[int]=0.02 ,__lowercase :Tuple=0.02 ,__lowercase :str=1e-0_5 ,__lowercase :Optional[Any]=1 ,__lowercase :List[str]=0 ,__lowercase :Dict=2 ,__lowercase :int="absolute" ,__lowercase :Dict=True ,__lowercase :Union[str, Any]=7_6_8 ,**__lowercase :Optional[int] ,): super().__init__(pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ) snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : Tuple = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Dict = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : int = type_vocab_size snake_case__ : str = initializer_range snake_case__ : Optional[Any] = initializer_factor snake_case__ : Any = layer_norm_eps snake_case__ : List[Any] = position_embedding_type snake_case__ : Optional[Any] = use_cache snake_case__ : int = project_dim class a ( __lowerCamelCase ): __lowerCAmelCase : Optional[int] = """altclip_vision_model""" def __init__( self :List[Any] ,__lowercase :str=7_6_8 ,__lowercase :Optional[int]=3_0_7_2 ,__lowercase :Union[str, Any]=5_1_2 ,__lowercase :List[Any]=1_2 ,__lowercase :List[str]=1_2 ,__lowercase :Union[str, Any]=3 ,__lowercase :Optional[Any]=2_2_4 ,__lowercase :Tuple=3_2 ,__lowercase :List[str]="quick_gelu" ,__lowercase :str=1e-5 ,__lowercase :Optional[Any]=0.0 ,__lowercase :Dict=0.02 ,__lowercase :Optional[int]=1.0 ,**__lowercase :List[str] ,): super().__init__(**__lowercase ) snake_case__ : Tuple = hidden_size snake_case__ : Union[str, Any] = intermediate_size snake_case__ : Optional[int] = projection_dim snake_case__ : Tuple = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : Any = num_channels snake_case__ : Optional[int] = patch_size snake_case__ : Tuple = image_size snake_case__ : Optional[int] = initializer_range snake_case__ : List[Any] = initializer_factor snake_case__ : List[Any] = attention_dropout snake_case__ : Optional[Any] = layer_norm_eps snake_case__ : Any = hidden_act @classmethod def __lowerCamelCase ( cls :List[str] ,__lowercase :Union[str, os.PathLike] ,**__lowercase :List[str] ): cls._set_token_in_kwargs(__lowercase ) snake_case__ , snake_case__ : List[str] = cls.get_config_dict(__lowercase ,**__lowercase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": snake_case__ : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__lowercase ,**__lowercase ) class a ( __lowerCamelCase ): __lowerCAmelCase : Union[str, Any] = """altclip""" __lowerCAmelCase : List[str] = True def __init__( self :Union[str, Any] ,__lowercase :Dict=None ,__lowercase :List[Any]=None ,__lowercase :Union[str, Any]=7_6_8 ,__lowercase :List[str]=2.6592 ,**__lowercase :str ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). snake_case__ : Tuple = kwargs.pop('''text_config_dict''' ,__lowercase ) snake_case__ : str = kwargs.pop('''vision_config_dict''' ,__lowercase ) super().__init__(**__lowercase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: snake_case__ : str = {} # This is the complete result when using `text_config_dict`. snake_case__ : Dict = AltCLIPTextConfig(**__lowercase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: snake_case__ : Any = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: snake_case__ : Dict = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(__lowercase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: snake_case__ : List[str] = {} # This is the complete result when using `vision_config_dict`. snake_case__ : List[Any] = AltCLIPVisionConfig(**__lowercase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: snake_case__ : int = { str(__lowercase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: snake_case__ : List[str] = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: snake_case__ : List[str] = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(__lowercase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: snake_case__ : Tuple = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: snake_case__ : Optional[int] = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) snake_case__ : int = AltCLIPTextConfig(**__lowercase ) snake_case__ : Dict = AltCLIPVisionConfig(**__lowercase ) snake_case__ : Dict = projection_dim snake_case__ : Any = logit_scale_init_value snake_case__ : Tuple = 1.0 @classmethod def __lowerCamelCase ( cls :Optional[int] ,__lowercase :AltCLIPTextConfig ,__lowercase :AltCLIPVisionConfig ,**__lowercase :Any ): return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__lowercase ) def __lowerCamelCase ( self :List[Any] ): snake_case__ : Tuple = copy.deepcopy(self.__dict__ ) snake_case__ : List[str] = self.text_config.to_dict() snake_case__ : Tuple = self.vision_config.to_dict() snake_case__ : int = self.__class__.model_type return output
219
from __future__ import annotations def _lowerCAmelCase ( __lowerCAmelCase ) -> list[int]: """simple docstring""" if len(__lowerCAmelCase ) == 0: return array snake_case__ , snake_case__ : int = min(__lowerCAmelCase ), max(__lowerCAmelCase ) # Compute the variables snake_case__ : Tuple = _max - _min + 1 snake_case__ , snake_case__ : Dict = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: snake_case__ : Dict = i - _min snake_case__ : List[Any] = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. snake_case__ : Optional[int] = 0 for i in range(__lowerCAmelCase ): while holes_repeat[i] > 0: snake_case__ : Dict = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() A__ = input('''Enter numbers separated by comma:\n''') A__ = [int(x) for x in user_input.split(''',''')] print(pigeon_sort(unsorted))
219
1
'''simple docstring''' def lowercase__( __UpperCamelCase: int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): raise TypeError('Input value must be an \'int\' type' ) SCREAMING_SNAKE_CASE : int = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
28
import collections import importlib.util import os import re from pathlib import Path _lowercase : List[Any] ='''src/transformers''' # Matches is_xxx_available() _lowercase : List[str] =re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} _lowercase : Any =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowercase : Optional[int] =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available _lowercase : int =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") _lowercase : Tuple =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", _lowercase : List[Any] =re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], _lowercase : List[Any] =re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo _lowercase : List[str] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: _lowercase : Any =re.compile(R'''^\s*try:''') # Catches a line with else: _lowercase : Optional[int] =re.compile(R'''^\s*else:''') def A__ ( lowercase: int ) -> Optional[Any]: if _re_test_backend.search(lowercase ) is None: return None A : List[str] =[b[0] for b in _re_backend.findall(lowercase )] backends.sort() return "_and_".join(lowercase ) def A__ ( lowercase: Tuple ) -> int: with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f: A : str =f.readlines() A : List[str] =0 while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowercase ): return None # First grab the objects without a specific backend in _import_structure A : Union[str, Any] =[] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: A : Union[str, Any] =lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowercase ): A : List[str] =_re_one_line_import_struct.search(lowercase ).groups()[0] A : Optional[int] =re.findall('\[([^\]]+)\]', lowercase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue A : int =_re_import_struct_key_value.search(lowercase ) if single_line_import_search is not None: A : List[str] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0] objects.extend(lowercase ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 A : Optional[int] ={'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. A : Dict =find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A : int =None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A : str =[] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): A : List[Any] =lines[line_index] if _re_import_struct_add_one.search(lowercase ) is not None: objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] ) elif _re_import_struct_add_many.search(lowercase ) is not None: A : List[str] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' ) A : Optional[Any] =[obj[1:-1] for obj in imports if len(lowercase ) > 0] objects.extend(lowercase ) elif _re_between_brackets.search(lowercase ) is not None: A : int =_re_between_brackets.search(lowercase ).groups()[0].split(', ' ) A : List[str] =[obj[1:-1] for obj in imports if len(lowercase ) > 0] objects.extend(lowercase ) elif _re_quote_object.search(lowercase ) is not None: objects.append(_re_quote_object.search(lowercase ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 A : Optional[Any] =objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend A : int =[] while ( line_index < len(lowercase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): A : List[str] =lines[line_index] A : Optional[int] =_re_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 A : Dict ={'none': objects} # Let's continue with backend-specific objects while line_index < len(lowercase ): # If the line is an if is_backend_available, we grab all objects associated. A : Optional[Any] =find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: A : str =None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 A : List[Any] =[] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): A : List[str] =lines[line_index] A : Optional[Any] =_re_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 A : Any =objects else: line_index += 1 return import_dict_objects, type_hint_objects def A__ ( lowercase: Dict, lowercase: str ) -> int: def find_duplicates(lowercase: int ): return [k for k, v in collections.Counter(lowercase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] A : Dict =[] for key in import_dict_objects.keys(): A : Optional[Any] =find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) A : str =find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): A : Tuple ='base imports' if key == 'none' else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def A__ ( ) -> int: A : List[str] =[] for root, _, files in os.walk(lowercase ): if "__init__.py" in files: A : Optional[int] =os.path.join(lowercase, '__init__.py' ) A : str =parse_init(lowercase ) if objects is not None: A : Union[str, Any] =analyze_results(*lowercase ) if len(lowercase ) > 0: A : Optional[int] =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append('\n'.join(lowercase ) ) if len(lowercase ) > 0: raise ValueError('\n\n'.join(lowercase ) ) def A__ ( ) -> Dict: A : List[Any] =[] for path, directories, files in os.walk(lowercase ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(lowercase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0: continue A : List[Any] =str((Path(lowercase ) / folder).relative_to(lowercase ) ) A : Union[str, Any] =short_path.replace(os.path.sep, '.' ) submodules.append(lowercase ) for fname in files: if fname == "__init__.py": continue A : int =str((Path(lowercase ) / fname).relative_to(lowercase ) ) A : str =short_path.replace('.py', '' ).replace(os.path.sep, '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(lowercase ) return submodules _lowercase : Dict =[ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def A__ ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. A : Optional[Any] =importlib.util.spec_from_file_location( 'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) A : List[Any] =spec.loader.load_module() A : Union[str, Any] =[ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(lowercase ) > 0: A : List[Any] ='\n'.join(F'- {module}' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F'{list_of_modules}\n' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
305
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = "▁" __a = {"vocab_file": "sentencepiece.bpe.model"} __a = { "vocab_file": { "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll02-spanish": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-english": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model" ), "xlm-roberta-large-finetuned-conll03-german": ( "https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model" ), } } __a = { "xlm-roberta-base": 5_12, "xlm-roberta-large": 5_12, "xlm-roberta-large-finetuned-conll02-dutch": 5_12, "xlm-roberta-large-finetuned-conll02-spanish": 5_12, "xlm-roberta-large-finetuned-conll03-english": 5_12, "xlm-roberta-large-finetuned-conll03-german": 5_12, } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Any = VOCAB_FILES_NAMES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Dict = ["""input_ids""", """attention_mask"""] def __init__( self: List[Any] , snake_case: Tuple , snake_case: List[str]="<s>" , snake_case: Optional[Any]="</s>" , snake_case: Union[str, Any]="</s>" , snake_case: Any="<s>" , snake_case: str="<unk>" , snake_case: int="<pad>" , snake_case: Dict="<mask>" , snake_case: Optional[Dict[str, Any]] = None , **snake_case: Optional[Any] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it snake_case_ :str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token snake_case_ :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , ) snake_case_ :List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case ) ) snake_case_ :List[str] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ :int = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ :List[str] = 1 snake_case_ :List[Any] = len(self.sp_model ) + self.fairseq_offset snake_case_ :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: str ) -> Optional[Any]: snake_case_ :Dict = self.__dict__.copy() snake_case_ :Any = None snake_case_ :Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self: Tuple , snake_case: Union[str, Any] ) -> Optional[Any]: snake_case_ :Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ :Any = {} snake_case_ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ :List[str] = [self.cls_token_id] snake_case_ :int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is None: return [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1] def lowerCAmelCase_ ( self: Any , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]: snake_case_ :List[str] = [self.sep_token_id] snake_case_ :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase_ ( self: str ) -> int: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]: snake_case_ :List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str ) -> List[str]: return self.sp_model.encode(snake_case , out_type=snake_case ) def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[str] ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ :str = self.sp_model.PieceToId(snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase_ ( self: int , snake_case: Dict ) -> List[str]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase_ ( self: str , snake_case: str ) -> Any: snake_case_ :List[str] = """""".join(snake_case ).replace(snake_case , """ """ ).strip() return out_string def lowerCAmelCase_ ( self: Tuple , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ :List[str] = os.path.join( snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , """wb""" ) as fi: snake_case_ :int = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
310
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """time_series_transformer""" _A : int = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self: Dict , snake_case: Optional[int] = None , snake_case: Optional[int] = None , snake_case: str = "student_t" , snake_case: str = "nll" , snake_case: int = 1 , snake_case: List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case: Optional[Union[str, bool]] = "mean" , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: int = 0 , snake_case: Optional[List[int]] = None , snake_case: Optional[List[int]] = None , snake_case: int = 32 , snake_case: int = 32 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: int = 2 , snake_case: bool = True , snake_case: str = "gelu" , snake_case: int = 64 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: float = 0.1 , snake_case: int = 100 , snake_case: float = 0.0_2 , snake_case: List[str]=True , **snake_case: List[str] , ) -> Union[str, Any]: # time series specific configuration snake_case_ :Any = prediction_length snake_case_ :Any = context_length or prediction_length snake_case_ :int = distribution_output snake_case_ :Any = loss snake_case_ :List[Any] = input_size snake_case_ :Any = num_time_features snake_case_ :Any = lags_sequence snake_case_ :Any = scaling snake_case_ :Any = num_dynamic_real_features snake_case_ :List[str] = num_static_real_features snake_case_ :List[Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) snake_case_ :int = cardinality else: snake_case_ :List[str] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(snake_case ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) snake_case_ :int = embedding_dimension else: snake_case_ :int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] snake_case_ :Dict = num_parallel_samples # Transformer architecture configuration snake_case_ :Any = input_size * len(snake_case ) + self._number_of_features snake_case_ :Dict = d_model snake_case_ :Optional[Any] = encoder_attention_heads snake_case_ :Tuple = decoder_attention_heads snake_case_ :Any = encoder_ffn_dim snake_case_ :Any = decoder_ffn_dim snake_case_ :Tuple = encoder_layers snake_case_ :int = decoder_layers snake_case_ :Tuple = dropout snake_case_ :Any = attention_dropout snake_case_ :List[str] = activation_dropout snake_case_ :Any = encoder_layerdrop snake_case_ :str = decoder_layerdrop snake_case_ :Union[str, Any] = activation_function snake_case_ :List[str] = init_std snake_case_ :int = use_cache super().__init__(is_encoder_decoder=snake_case , **snake_case ) @property def lowerCAmelCase_ ( self: Optional[Any] ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
310
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCamelCase__ ( __A :Tuple ): """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __snake_case ( nn.Module ): """simple docstring""" def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" super().__init__() __snake_case = module __snake_case = nn.Sequential( nn.Linear(module.in_features , _UpperCamelCase , bias=_UpperCamelCase ) , nn.Linear(_UpperCamelCase , module.out_features , bias=_UpperCamelCase ) , ) __snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a ( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]: """simple docstring""" return self.module(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) + self.adapter(_UpperCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE = "bigscience/bloom-1b7" # Constant values __SCREAMING_SNAKE_CASE = 2.1_09_65_95_52_69_25_74 __SCREAMING_SNAKE_CASE = "Hello my name is" __SCREAMING_SNAKE_CASE = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) __SCREAMING_SNAKE_CASE = 10 def a ( self ) -> Union[str, Any]: """simple docstring""" __snake_case = AutoTokenizer.from_pretrained(self.model_name ) class __snake_case ( snake_case__ ): """simple docstring""" def a ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""" ) __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) def a ( self ) -> Any: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a ( self ) -> Tuple: """simple docstring""" __snake_case = self.model_abit.config self.assertTrue(hasattr(_UpperCamelCase , """quantization_config""" ) ) __snake_case = config.to_dict() __snake_case = config.to_diff_dict() __snake_case = config.to_json_string() def a ( self ) -> Dict: """simple docstring""" from bitsandbytes.nn import Paramsabit __snake_case = self.model_fpaa.get_memory_footprint() __snake_case = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __snake_case = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a ( self ) -> Any: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a ( self ) -> List[str]: """simple docstring""" __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ) __snake_case = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def a ( self ) -> Union[str, Any]: """simple docstring""" __snake_case = BitsAndBytesConfig() __snake_case = True __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCamelCase , device_map="""auto""" ) __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ) __snake_case = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def a ( self ) -> int: """simple docstring""" with self.assertRaises(_UpperCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCamelCase ) def a ( self ) -> Optional[Any]: """simple docstring""" __snake_case = BitsAndBytesConfig() with self.assertRaises(_UpperCamelCase ): __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCamelCase , load_in_abit=_UpperCamelCase , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def a ( self ) -> List[Any]: """simple docstring""" with self.assertRaises(_UpperCamelCase ): # Tries with `str` self.model_abit.to("""cpu""" ) with self.assertRaises(_UpperCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCamelCase ): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""" ) ) with self.assertRaises(_UpperCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ) __snake_case = self.model_fpaa.to(torch.floataa ) __snake_case = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __snake_case = self.model_fpaa.to("""cpu""" ) # Check this does not throw an error __snake_case = self.model_fpaa.half() # Check this does not throw an error __snake_case = self.model_fpaa.float() def a ( self ) -> Dict: """simple docstring""" __snake_case = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=_UpperCamelCase , device_map="""auto""" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __snake_case ( unittest.TestCase ): """simple docstring""" @classmethod def a ( cls ) -> List[str]: """simple docstring""" __snake_case = """t5-small""" __snake_case = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense __snake_case = AutoTokenizer.from_pretrained(cls.model_name ) __snake_case = """Translate in German: Hello, my dog is cute""" def a ( self ) -> Tuple: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a ( self ) -> Dict: """simple docstring""" from transformers import TaForConditionalGeneration __snake_case = TaForConditionalGeneration._keep_in_fpaa_modules __snake_case = None # test with `t5-small` __snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) __snake_case = model.generate(**_UpperCamelCase ) # test with `flan-t5-small` __snake_case = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) __snake_case = model.generate(**_UpperCamelCase ) __snake_case = modules def a ( self ) -> Tuple: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) __snake_case = model.generate(**_UpperCamelCase ) # test with `flan-t5-small` __snake_case = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 ) __snake_case = model.generate(**_UpperCamelCase ) class __snake_case ( snake_case__ ): """simple docstring""" def a ( self ) -> Optional[Any]: """simple docstring""" super().setUp() # model_name __snake_case = """bigscience/bloom-560m""" __snake_case = """t5-small""" # Different types of model __snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) # Sequence classification model __snake_case = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) # CausalLM model __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) # Seq2seq model __snake_case = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCamelCase , device_map="""auto""" ) def a ( self ) -> Dict: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a ( self ) -> List[str]: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __snake_case ( snake_case__ ): """simple docstring""" def a ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() def a ( self ) -> Union[str, Any]: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a ( self ) -> Tuple: """simple docstring""" __snake_case = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __snake_case = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __snake_case ( snake_case__ ): """simple docstring""" def a ( self ) -> Tuple: """simple docstring""" super().setUp() def a ( self ) -> Union[str, Any]: """simple docstring""" __snake_case = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCamelCase , device_map="""balanced""" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __snake_case = self.tokenizer(self.input_text , return_tensors="""pt""" ) # Second real batch __snake_case = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCamelCase ) , self.EXPECTED_OUTPUTS ) class __snake_case ( snake_case__ ): """simple docstring""" def a ( self ) -> List[Any]: """simple docstring""" __snake_case = """facebook/opt-350m""" super().setUp() def a ( self ) -> Optional[int]: """simple docstring""" if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ): return # Step 1: freeze all parameters __snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __snake_case = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __snake_case = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCamelCase ) ): __snake_case = LoRALayer(module.q_proj , rank=16 ) __snake_case = LoRALayer(module.k_proj , rank=16 ) __snake_case = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __snake_case = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __snake_case = model.forward(**_UpperCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __snake_case ( snake_case__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = "gpt2-xl" __SCREAMING_SNAKE_CASE = 3.31_91_85_48_54_15_21_87
268
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCamelCase__ = '''Create a default config file for Accelerate with only a few flags set.''' def lowerCamelCase__ ( __A :List[Any]="no" ,__A :str = default_json_config_file ,__A :bool = False ): """simple docstring""" __snake_case = Path(__A ) path.parent.mkdir(parents=__A ,exist_ok=__A ) if path.exists(): print( F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' ) return False __snake_case = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' ) __snake_case = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): __snake_case = torch.cuda.device_count() __snake_case = num_gpus __snake_case = False if num_gpus > 1: __snake_case = """MULTI_GPU""" else: __snake_case = """NO""" elif is_xpu_available() and use_xpu: __snake_case = torch.xpu.device_count() __snake_case = num_xpus __snake_case = False if num_xpus > 1: __snake_case = """MULTI_XPU""" else: __snake_case = """NO""" elif is_npu_available(): __snake_case = torch.npu.device_count() __snake_case = num_npus __snake_case = False if num_npus > 1: __snake_case = """MULTI_NPU""" else: __snake_case = """NO""" else: __snake_case = 0 __snake_case = True __snake_case = 1 __snake_case = """NO""" __snake_case = ClusterConfig(**__A ) config.to_json_file(__A ) return path def lowerCamelCase__ ( __A :Dict ,__A :List[Any] ): """simple docstring""" __snake_case = parser.add_parser("""default""" ,parents=__A ,help=__A ,formatter_class=__A ) parser.add_argument( """--config_file""" ,default=__A ,help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) ,dest="""save_location""" ,) parser.add_argument( """--mixed_precision""" ,choices=["""no""", """fp16""", """bf16"""] ,type=__A ,help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" ,default="""no""" ,) parser.set_defaults(func=__A ) return parser def lowerCamelCase__ ( __A :Optional[Any] ): """simple docstring""" __snake_case = write_basic_config(args.mixed_precision ,args.save_location ) if config_file: print(F'accelerate configuration saved at {config_file}' )
268
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : Optional[Any] ={ '''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''], '''tokenization_perceiver''': ['''PerceiverTokenizer'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] =['''PerceiverFeatureExtractor'''] a__ : List[str] =['''PerceiverImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PerceiverForImageClassificationConvProcessing''', '''PerceiverForImageClassificationFourier''', '''PerceiverForImageClassificationLearned''', '''PerceiverForMaskedLM''', '''PerceiverForMultimodalAutoencoding''', '''PerceiverForOpticalFlow''', '''PerceiverForSequenceClassification''', '''PerceiverLayer''', '''PerceiverModel''', '''PerceiverPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
434
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[int] =logging.get_logger(__name__) a__ : int ={ '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any ="xlnet" SCREAMING_SNAKE_CASE_ : List[str] =["mems"] SCREAMING_SNAKE_CASE_ : Dict ={ "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : List[Any] , __A : Optional[Any]=3_2_0_0_0 , __A : int=1_0_2_4 , __A : Tuple=2_4 , __A : Dict=1_6 , __A : str=4_0_9_6 , __A : List[str]="gelu" , __A : int=True , __A : str="bi" , __A : List[str]=0.02 , __A : List[Any]=1e-12 , __A : Optional[Any]=0.1 , __A : str=5_1_2 , __A : Any=None , __A : str=True , __A : Dict=False , __A : str=False , __A : Tuple=-1 , __A : List[Any]=False , __A : str="last" , __A : Optional[Any]=True , __A : Optional[int]="tanh" , __A : Any=0.1 , __A : List[str]=5 , __A : Tuple=5 , __A : Dict=5 , __A : str=1 , __A : Optional[Any]=2 , **__A : List[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = n_layer __UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) __UpperCamelCase = d_model // n_head __UpperCamelCase = ff_activation __UpperCamelCase = d_inner __UpperCamelCase = untie_r __UpperCamelCase = attn_type __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = dropout __UpperCamelCase = mem_len __UpperCamelCase = reuse_len __UpperCamelCase = bi_data __UpperCamelCase = clamp_len __UpperCamelCase = same_length __UpperCamelCase = summary_type __UpperCamelCase = summary_use_proj __UpperCamelCase = summary_activation __UpperCamelCase = summary_last_dropout __UpperCamelCase = start_n_top __UpperCamelCase = end_n_top __UpperCamelCase = bos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , __A , ) __UpperCamelCase = kwargs['use_cache'] __UpperCamelCase = use_mems_eval __UpperCamelCase = use_mems_train super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) @property def _lowerCamelCase ( self : List[str] ): logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def _lowerCamelCase ( self : int , __A : Optional[int] ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
434
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a : Any = logging.get_logger(__name__) __a : Tuple = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : int = '''unispeech-sat''' def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=5_04 , **lowerCAmelCase__ , ) -> Any: '''simple docstring''' super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) __lowercase = hidden_size __lowercase = feat_extract_norm __lowercase = feat_extract_activation __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = conv_bias __lowercase = num_conv_pos_embeddings __lowercase = num_conv_pos_embedding_groups __lowercase = len(self.conv_dim ) __lowercase = num_hidden_layers __lowercase = intermediate_size __lowercase = hidden_act __lowercase = num_attention_heads __lowercase = hidden_dropout __lowercase = attention_dropout __lowercase = activation_dropout __lowercase = feat_proj_dropout __lowercase = final_dropout __lowercase = layerdrop __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = vocab_size __lowercase = num_clusters __lowercase = do_stable_layer_norm __lowercase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase = apply_spec_augment __lowercase = mask_time_prob __lowercase = mask_time_length __lowercase = mask_time_min_masks __lowercase = mask_feature_prob __lowercase = mask_feature_length __lowercase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowercase = num_codevectors_per_group __lowercase = num_codevector_groups __lowercase = contrastive_logits_temperature __lowercase = feat_quantizer_dropout __lowercase = num_negatives __lowercase = codevector_dim __lowercase = proj_codevector_dim __lowercase = diversity_loss_weight # ctc loss __lowercase = ctc_loss_reduction __lowercase = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowercase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = list(lowerCAmelCase__ ) __lowercase = xvector_output_dim @property def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
534
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _UpperCamelCase ( _UpperCAmelCase ): """simple docstring""" __a : UNetaDModel __a : ScoreSdeVeScheduler def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' super().__init__() self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) @torch.no_grad() def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 20_00 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' __lowercase = self.unet.config.sample_size __lowercase = (batch_size, 3, img_size, img_size) __lowercase = self.unet __lowercase = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma __lowercase = sample.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase__ ) self.scheduler.set_sigmas(lowerCAmelCase__ ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __lowercase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __lowercase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample __lowercase = self.scheduler.step_correct(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample # prediction step __lowercase = model(lowerCAmelCase__ , lowerCAmelCase__ ).sample __lowercase = self.scheduler.step_pred(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) __lowercase , __lowercase = output.prev_sample, output.prev_sample_mean __lowercase = sample_mean.clamp(0 , 1 ) __lowercase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowercase = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (sample,) return ImagePipelineOutput(images=lowerCAmelCase__ )
534
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__= PriorTransformer A__= 'hidden_states' @property def _UpperCAmelCase ( self : Dict ): """simple docstring""" UpperCAmelCase__ = 4 UpperCAmelCase__ = 8 UpperCAmelCase__ = 7 UpperCAmelCase__ = floats_tensor((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = floats_tensor((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowercase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : List[Any]=0 ): """simple docstring""" torch.manual_seed(_lowercase ) UpperCAmelCase__ = 4 UpperCAmelCase__ = 8 UpperCAmelCase__ = 7 UpperCAmelCase__ = torch.randn((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = torch.randn((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowercase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _UpperCAmelCase ( self : str ): """simple docstring""" return (4, 8) @property def _UpperCAmelCase ( self : Tuple ): """simple docstring""" return (4, 8) def _UpperCAmelCase ( self : int ): """simple docstring""" UpperCAmelCase__ = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } UpperCAmelCase__ = self.dummy_input return init_dict, inputs_dict def _UpperCAmelCase ( self : Tuple ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=_lowercase ) self.assertIsNotNone(_lowercase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_lowercase ) UpperCAmelCase__ = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _UpperCAmelCase ( self : int ): """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase__ = self.model_class(**_lowercase ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , _lowercase ) def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) UpperCAmelCase__ = model.to(_lowercase ) if hasattr(_lowercase , "set_default_attn_processor" ): model.set_default_attn_processor() UpperCAmelCase__ = self.get_dummy_seed_input() with torch.no_grad(): UpperCAmelCase__ = model(**_lowercase )[0] UpperCAmelCase__ = output[0, :5].flatten().cpu() print(_lowercase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. UpperCAmelCase__ = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] ) self.assertTrue(torch_all_close(_lowercase , _lowercase , rtol=1E-2 ) ) @slow class lowercase__ ( unittest.TestCase ): def _UpperCAmelCase ( self : List[Any] , _lowercase : Optional[Any]=1 , _lowercase : List[Any]=7_68 , _lowercase : Dict=77 , _lowercase : Dict=0 ): """simple docstring""" torch.manual_seed(_lowercase ) UpperCAmelCase__ = batch_size UpperCAmelCase__ = embedding_dim UpperCAmelCase__ = num_embeddings UpperCAmelCase__ = torch.randn((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = torch.randn((batch_size, embedding_dim) ).to(_lowercase ) UpperCAmelCase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowercase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _UpperCAmelCase ( self : List[str] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ] ) def _UpperCAmelCase ( self : List[Any] , _lowercase : Any , _lowercase : List[str] ): """simple docstring""" UpperCAmelCase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" ) model.to(_lowercase ) UpperCAmelCase__ = self.get_dummy_seed_input(seed=_lowercase ) with torch.no_grad(): UpperCAmelCase__ = model(**_lowercase )[0] assert list(sample.shape ) == [1, 7_68] UpperCAmelCase__ = sample[0, :8].flatten().cpu() print(_lowercase ) UpperCAmelCase__ = torch.tensor(_lowercase ) assert torch_all_close(_lowercase , _lowercase , atol=1E-3 )
277
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch A = logging.get_logger(__name__) class lowercase__ : def __init__( self : List[str] , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Dict=None , _lowercase : Any=None ): """simple docstring""" if not conversation_id: UpperCAmelCase__ = uuid.uuida() if past_user_inputs is None: UpperCAmelCase__ = [] if generated_responses is None: UpperCAmelCase__ = [] UpperCAmelCase__ = conversation_id UpperCAmelCase__ = past_user_inputs UpperCAmelCase__ = generated_responses UpperCAmelCase__ = text def __eq__( self : Any , _lowercase : str ): """simple docstring""" if not isinstance(_lowercase , _lowercase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _UpperCAmelCase ( self : Any , _lowercase : str , _lowercase : bool = False ): """simple docstring""" if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) UpperCAmelCase__ = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: UpperCAmelCase__ = text def _UpperCAmelCase ( self : int ): """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) UpperCAmelCase__ = None def _UpperCAmelCase ( self : List[str] , _lowercase : str ): """simple docstring""" self.generated_responses.append(_lowercase ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): UpperCAmelCase__ = "user" if is_user else "bot" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( __SCREAMING_SNAKE_CASE , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowercase__ ( __SCREAMING_SNAKE_CASE ): def __init__( self : str , *_lowercase : Union[str, Any] , **_lowercase : Any ): """simple docstring""" super().__init__(*_lowercase , **_lowercase ) if self.tokenizer.pad_token_id is None: UpperCAmelCase__ = self.tokenizer.eos_token def _UpperCAmelCase ( self : List[str] , _lowercase : int=None , _lowercase : Any=None , _lowercase : List[str]=None , **_lowercase : Dict ): """simple docstring""" UpperCAmelCase__ = {} UpperCAmelCase__ = {} UpperCAmelCase__ = {} if min_length_for_response is not None: UpperCAmelCase__ = min_length_for_response if minimum_tokens is not None: UpperCAmelCase__ = minimum_tokens if "max_length" in generate_kwargs: UpperCAmelCase__ = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: UpperCAmelCase__ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_lowercase ) return preprocess_params, forward_params, postprocess_params def __call__( self : Dict , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : List[str]=0 , **_lowercase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase ) if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1: return outputs[0] return outputs def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Conversation , _lowercase : List[Any]=32 ): """simple docstring""" if not isinstance(_lowercase , _lowercase ): raise ValueError("ConversationalPipeline, expects Conversation as inputs" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer , "_build_conversation_input_ids" ): UpperCAmelCase__ = self.tokenizer._build_conversation_input_ids(_lowercase ) else: # If the tokenizer cannot handle conversations, we default to only the old version UpperCAmelCase__ = self._legacy_parse_and_tokenize(_lowercase ) if self.framework == "pt": UpperCAmelCase__ = torch.LongTensor([input_ids] ) elif self.framework == "tf": UpperCAmelCase__ = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any]=10 , **_lowercase : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) UpperCAmelCase__ = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) UpperCAmelCase__ = max_length - minimum_tokens UpperCAmelCase__ = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: UpperCAmelCase__ = model_inputs["attention_mask"][:, -trim:] UpperCAmelCase__ = model_inputs.pop("conversation" ) UpperCAmelCase__ = max_length UpperCAmelCase__ = self.model.generate(**_lowercase , **_lowercase ) if self.model.config.is_encoder_decoder: UpperCAmelCase__ = 1 else: UpperCAmelCase__ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _UpperCAmelCase ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]=True ): """simple docstring""" UpperCAmelCase__ = model_outputs["output_ids"] UpperCAmelCase__ = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) UpperCAmelCase__ = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(_lowercase ) return conversation def _UpperCAmelCase ( self : List[str] , _lowercase : Conversation ): """simple docstring""" UpperCAmelCase__ = self.tokenizer.eos_token_id UpperCAmelCase__ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) ) if len(_lowercase ) > self.tokenizer.model_max_length: UpperCAmelCase__ = input_ids[-self.tokenizer.model_max_length :] return input_ids
277
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging __snake_case :Any =logging.get_logger(__name__) __snake_case :List[Any] ={ 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json', } class lowerCAmelCase__ ( _lowerCamelCase ): A_ : int = 'bloom' A_ : Any = ['past_key_values'] A_ : Dict = { 'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', } def __init__( self : Any , __UpperCamelCase : Optional[int]=250_880 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : str=1e-5 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Dict , ) -> Any: A = vocab_size # Backward compatibility with n_embed kwarg A = kwargs.pop('n_embed' , __UpperCamelCase ) A = hidden_size if n_embed is None else n_embed A = n_layer A = n_head A = layer_norm_epsilon A = initializer_range A = use_cache A = pretraining_tp A = apply_residual_connection_post_layernorm A = hidden_dropout A = attention_dropout A = bos_token_id A = eos_token_id A = slow_but_exact super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) class lowerCAmelCase__ ( _lowerCamelCase ): A_ : str = version.parse('1.12' ) def __init__( self : Optional[int] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : str = "default" , __UpperCamelCase : List[PatchingSpec] = None , __UpperCamelCase : bool = False , ) -> Any: super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase ) if not getattr(self._config , 'pad_token_id' , __UpperCamelCase ): # TODO: how to do that better? A = 0 @property def __UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: A = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(__UpperCamelCase , direction='inputs' , inverted_values_shape=__UpperCamelCase ) A = {0: 'batch', 1: 'past_sequence + sequence'} else: A = {0: 'batch', 1: 'sequence'} return common_inputs @property def __UpperCamelCase ( self : int ) -> int: return self._config.n_layer @property def __UpperCamelCase ( self : Union[str, Any] ) -> int: return self._config.n_head @property def __UpperCamelCase ( self : str ) -> float: return 1e-3 def __UpperCamelCase ( self : List[str] , __UpperCamelCase : "PreTrainedTokenizer" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]: A = super(__UpperCamelCase , self ).generate_dummy_inputs( __UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase ) # We need to order the input in the way they appears in the forward() A = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A , A = common_inputs['input_ids'].shape # Not using the same length for past_key_values A = seqlen + 2 A = self._config.hidden_size // self.num_attention_heads A = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) A = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) A = [ (torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers ) ] A = common_inputs['attention_mask'] if self.use_past: A = ordered_inputs['attention_mask'].dtype A = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 ) return ordered_inputs @property def __UpperCamelCase ( self : Optional[Any] ) -> int: return 13
106
'''simple docstring''' def lowerCAmelCase (__A): """simple docstring""" return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''')) def lowerCAmelCase (__A): """simple docstring""" _a = credit_card_number _a = 0 _a = len(__A) - 2 for i in range(__A , -1 , -2): # double the value of every second digit _a = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _a = cc_number[:i] + str(__A) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__A) - 1 , -1 , -2): total += int(cc_number[i]) return total % 10 == 0 def lowerCAmelCase (__A): """simple docstring""" _a = F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''') return False if not 13 <= len(__A) <= 16: print(F'''{error_message} of its length.''') return False if not validate_initial_digits(__A): print(F'''{error_message} of its first two digits.''') return False if not luhn_validation(__A): print(F'''{error_message} it fails the Luhn check.''') return False print(F'''{credit_card_number} is a valid credit card number.''') return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
11
0
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union lowercase__ : List[Any] = TypeVar('''T''') lowercase__ : Optional[int] = Union[List[T], Tuple[T, ...]] lowercase__ : Optional[int] = Union[T, List[T], Dict[str, T]] lowercase__ : Optional[int] = Union[str, bytes, os.PathLike]
485
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) lowercase__ : List[str] = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def __lowercase ( _a ): snake_case_ : List[str] = {} state_dict.pop('''pixel_mean''' , _a ) state_dict.pop('''pixel_std''' , _a ) snake_case_ : Union[str, Any] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: snake_case_ : Optional[int] = key.replace(_a , _a ) if re.match(_a , _a ): snake_case_ : Union[str, Any] = int(re.match(_a , _a ).group(2 ) ) if layer_nb == 0: snake_case_ : Optional[int] = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: snake_case_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: snake_case_ : List[Any] = key.replace('''layers.2''' , '''proj_out''' ) snake_case_ : Optional[Any] = value snake_case_ : Tuple = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def __lowercase ( _a , _a , _a , _a="ybelkada/segment-anything" ): snake_case_ : Optional[Any] = hf_hub_download(_a , f"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: snake_case_ : Tuple = SamConfig() elif "sam_vit_l" in model_name: snake_case_ : Optional[Any] = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) snake_case_ : Union[str, Any] = SamConfig( vision_config=_a , ) elif "sam_vit_h" in model_name: snake_case_ : Tuple = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) snake_case_ : List[str] = SamConfig( vision_config=_a , ) snake_case_ : Tuple = torch.load(_a , map_location='''cpu''' ) snake_case_ : Optional[Any] = replace_keys(_a ) snake_case_ : Any = SamImageProcessor() snake_case_ : Optional[Any] = SamProcessor(image_processor=_a ) snake_case_ : Tuple = SamModel(_a ) hf_model.load_state_dict(_a ) snake_case_ : Tuple = hf_model.to('''cuda''' ) snake_case_ : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' snake_case_ : Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' ) snake_case_ : Tuple = [[[400, 650]]] snake_case_ : List[str] = [[1]] snake_case_ : Optional[int] = processor(images=np.array(_a ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case_ : Optional[Any] = hf_model(**_a ) snake_case_ : Any = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 snake_case_ : Optional[Any] = processor( images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case_ : Optional[Any] = hf_model(**_a ) snake_case_ : Tuple = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 snake_case_ : Tuple = ((75, 275, 1_725, 850),) snake_case_ : Optional[Any] = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case_ : Dict = hf_model(**_a ) snake_case_ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. snake_case_ : Union[str, Any] = [[[400, 650], [800, 650]]] snake_case_ : Optional[int] = [[1, 1]] snake_case_ : Tuple = processor( images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): snake_case_ : Dict = hf_model(**_a ) snake_case_ : Dict = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() lowercase__ : Any = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) lowercase__ : Tuple = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
485
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : List[str] ) ->Tuple: """simple docstring""" a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) a = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def __lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __lowerCAmelCase ( self : int ) ->Tuple: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __lowerCAmelCase ( self : int ) ->Any: """simple docstring""" a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def __lowerCAmelCase ( self : Tuple ) ->Dict: """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase__ = Accelerator() UpperCAmelCase__ = (accelerator.state.process_index + 2, 10) UpperCAmelCase__ = torch.randint(0, 10, shape).to(accelerator.device) UpperCAmelCase__ = "" UpperCAmelCase__ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCAmelCase__ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCAmelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
117
from __future__ import annotations UpperCAmelCase__ = list[tuple[int, int]] UpperCAmelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class lowercase_ : '''simple docstring''' def __init__( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Node | None , ) ->int: """simple docstring""" a = pos_x a = pos_y a = (pos_y, pos_x) a = goal_x a = goal_y a = g_cost a = parent a = self.calculate_heuristic() def __lowerCAmelCase ( self : Any ) ->float: """simple docstring""" a = abs(self.pos_x - self.goal_x ) a = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Any , __UpperCAmelCase : Tuple ) ->bool: """simple docstring""" return self.f_cost < other.f_cost class lowercase_ : '''simple docstring''' def __init__( self : Optional[Any] , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : tuple[int, int] ) ->Dict: """simple docstring""" a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCAmelCase ) a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __UpperCAmelCase ) a = [self.start] a = [] a = False def __lowerCAmelCase ( self : str ) ->Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() a = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: a = True return self.retrace_path(__UpperCAmelCase ) self.closed_nodes.append(__UpperCAmelCase ) a = self.get_successors(__UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__UpperCAmelCase ) else: # retrieve the best current path a = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__UpperCAmelCase ) else: self.open_nodes.append(__UpperCAmelCase ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Node ) ->list[Node]: """simple docstring""" a = [] for action in delta: a = parent.pos_x + action[1] a = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCAmelCase , ) ) return successors def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Node | None ) ->Path: """simple docstring""" a = node a = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) a = current_node.parent path.reverse() return path if __name__ == "__main__": UpperCAmelCase__ = (0, 0) UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") UpperCAmelCase__ = GreedyBestFirst(init, goal) UpperCAmelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: UpperCAmelCase__ = 2 for elem in grid: print(elem)
117
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : List[str] = original_name.split('''.''' )[0] __lowerCAmelCase : Dict = key.split('''.''' ) __lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 2] ) __lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 1] ) __lowerCAmelCase : List[Any] = orig_block_num - offset __lowerCAmelCase : int = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[Any] = OrderedDict() __lowerCAmelCase, __lowerCAmelCase : Optional[int] = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): __lowerCAmelCase : Optional[Any] = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 __lowerCAmelCase : str = key[: key.find('''proj''' )] __lowerCAmelCase : Optional[Any] = key.replace(lowercase__ , f"""patch_embeddings.{total_embed_found}.""" ) __lowerCAmelCase : Optional[Any] = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: __lowerCAmelCase : List[Any] = '''poolformer.encoder.''' + key if "mlp.fc1" in key: __lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: __lowerCAmelCase : int = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: __lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm1''' , '''before_norm''' ) if "norm2" in key: __lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: __lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: __lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: __lowerCAmelCase : List[Any] = key.replace('''head''' , '''classifier''' ) __lowerCAmelCase : Dict = value return new_state_dict def _lowercase ( ): __lowerCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : str = PoolFormerConfig() # set attributes based on model_name __lowerCAmelCase : List[Any] = '''huggingface/label-files''' __lowerCAmelCase : Union[str, Any] = model_name[-3:] __lowerCAmelCase : Dict = 1_0_0_0 __lowerCAmelCase : List[str] = '''imagenet-1k-id2label.json''' __lowerCAmelCase : List[str] = (1, 1_0_0_0) # set config attributes __lowerCAmelCase : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCAmelCase : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()} __lowerCAmelCase : Union[str, Any] = idalabel __lowerCAmelCase : str = {v: k for k, v in idalabel.items()} if size == "s12": __lowerCAmelCase : int = [2, 2, 6, 2] __lowerCAmelCase : Any = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase : Union[str, Any] = 4.0 __lowerCAmelCase : str = 0.9 elif size == "s24": __lowerCAmelCase : List[str] = [4, 4, 1_2, 4] __lowerCAmelCase : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase : Optional[int] = 4.0 __lowerCAmelCase : Optional[int] = 0.9 elif size == "s36": __lowerCAmelCase : Optional[int] = [6, 6, 1_8, 6] __lowerCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] __lowerCAmelCase : str = 4.0 __lowerCAmelCase : str = 1E-6 __lowerCAmelCase : Optional[Any] = 0.9 elif size == "m36": __lowerCAmelCase : Any = [6, 6, 1_8, 6] __lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] __lowerCAmelCase : Any = 4.0 __lowerCAmelCase : Any = 1E-6 __lowerCAmelCase : Any = 0.9_5 elif size == "m48": __lowerCAmelCase : Union[str, Any] = [8, 8, 2_4, 8] __lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] __lowerCAmelCase : int = 4.0 __lowerCAmelCase : int = 1E-6 __lowerCAmelCase : List[Any] = 0.9_5 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor __lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=lowercase__ ) # Prepare image __lowerCAmelCase : Tuple = prepare_img() __lowerCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict __lowerCAmelCase : str = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) ) # rename keys __lowerCAmelCase : List[Any] = rename_keys(lowercase__ ) # create HuggingFace model and load state dict __lowerCAmelCase : Any = PoolFormerForImageClassification(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # Define image processor __lowerCAmelCase : Dict = PoolFormerImageProcessor(crop_pct=lowercase__ ) __lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass __lowerCAmelCase : List[Any] = model(lowercase__ ) __lowerCAmelCase : Union[str, Any] = outputs.logits # define expected logit slices for different models if size == "s12": __lowerCAmelCase : Dict = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": __lowerCAmelCase : int = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": __lowerCAmelCase : Optional[int] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": __lowerCAmelCase : List[Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": __lowerCAmelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) _UpperCamelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
583
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ ): __lowerCAmelCase : Optional[int] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __lowerCAmelCase : Any = [1_4_4, 1_9_2, 2_4_0] __lowerCAmelCase : Dict = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0] elif "mobilevit_xs" in mobilevit_name: __lowerCAmelCase : int = [9_6, 1_2_0, 1_4_4] __lowerCAmelCase : str = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4] elif "mobilevit_xxs" in mobilevit_name: __lowerCAmelCase : List[str] = [6_4, 8_0, 9_6] __lowerCAmelCase : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0] __lowerCAmelCase : Tuple = 0.0_5 __lowerCAmelCase : List[str] = 2.0 if mobilevit_name.startswith('''deeplabv3_''' ): __lowerCAmelCase : int = 5_1_2 __lowerCAmelCase : Dict = 1_6 __lowerCAmelCase : Union[str, Any] = 2_1 __lowerCAmelCase : List[Any] = '''pascal-voc-id2label.json''' else: __lowerCAmelCase : List[str] = 1_0_0_0 __lowerCAmelCase : Any = '''imagenet-1k-id2label.json''' __lowerCAmelCase : Union[str, Any] = '''huggingface/label-files''' __lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCAmelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()} __lowerCAmelCase : Optional[int] = idalabel __lowerCAmelCase : str = {v: k for k, v in idalabel.items()} return config def _lowercase ( lowercase__ , lowercase__=False ): for i in range(1 , 6 ): if f"""layer_{i}.""" in name: __lowerCAmelCase : Dict = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: __lowerCAmelCase : int = name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: __lowerCAmelCase : Dict = name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: __lowerCAmelCase : Optional[Any] = name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: __lowerCAmelCase : int = name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: __lowerCAmelCase : int = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: __lowerCAmelCase : Optional[Any] = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: __lowerCAmelCase : List[str] = name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: __lowerCAmelCase : List[Any] = name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: __lowerCAmelCase : List[Any] = name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: __lowerCAmelCase : Union[str, Any] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: __lowerCAmelCase : Any = name.replace(f""".{i}.{j}.""" , f""".{i}.""" ) if "expand_1x1" in name: __lowerCAmelCase : List[Any] = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: __lowerCAmelCase : Optional[int] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: __lowerCAmelCase : Any = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if f""".global_rep.{i}.weight""" in name: __lowerCAmelCase : int = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' ) if f""".global_rep.{i}.bias""" in name: __lowerCAmelCase : Dict = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' ) if ".global_rep." in name: __lowerCAmelCase : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: __lowerCAmelCase : str = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: __lowerCAmelCase : Any = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: __lowerCAmelCase : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: __lowerCAmelCase : Dict = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: __lowerCAmelCase : Union[str, Any] = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: __lowerCAmelCase : List[str] = name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: __lowerCAmelCase : Dict = name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: __lowerCAmelCase : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: __lowerCAmelCase : Optional[Any] = name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: __lowerCAmelCase : Tuple = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: __lowerCAmelCase : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): __lowerCAmelCase : Optional[Any] = '''mobilevit.''' + name return name def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ): if base_model: __lowerCAmelCase : List[Any] = '''''' else: __lowerCAmelCase : List[str] = '''mobilevit.''' for key in orig_state_dict.copy().keys(): __lowerCAmelCase : Optional[int] = orig_state_dict.pop(lowercase__ ) if key[:8] == "encoder.": __lowerCAmelCase : Dict = key[8:] if "qkv" in key: __lowerCAmelCase : List[str] = key.split('''.''' ) __lowerCAmelCase : Any = int(key_split[0][6:] ) - 1 __lowerCAmelCase : List[Any] = int(key_split[3] ) __lowerCAmelCase : List[Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" ) __lowerCAmelCase : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size __lowerCAmelCase : Union[str, Any] = ( f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: __lowerCAmelCase : Dict = val[:dim, :] __lowerCAmelCase : List[Any] = val[dim : dim * 2, :] __lowerCAmelCase : Optional[int] = val[-dim:, :] else: __lowerCAmelCase : Optional[int] = val[:dim] __lowerCAmelCase : List[str] = val[dim : dim * 2] __lowerCAmelCase : List[Any] = val[-dim:] else: __lowerCAmelCase : int = val return orig_state_dict def _lowercase ( ): __lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ): __lowerCAmelCase : Optional[Any] = get_mobilevit_config(lowercase__ ) # load original state_dict __lowerCAmelCase : Any = torch.load(lowercase__ , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): __lowerCAmelCase : str = MobileViTForSemanticSegmentation(lowercase__ ).eval() else: __lowerCAmelCase : Optional[int] = MobileViTForImageClassification(lowercase__ ).eval() __lowerCAmelCase : int = convert_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __lowerCAmelCase : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) __lowerCAmelCase : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowerCAmelCase : List[Any] = model(**lowercase__ ) __lowerCAmelCase : Dict = outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 2_1, 3_2, 3_2) if mobilevit_name == "deeplabv3_mobilevit_s": __lowerCAmelCase : int = torch.tensor( [ [[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]], [[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]], [[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __lowerCAmelCase : Dict = torch.tensor( [ [[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]], [[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]], [[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __lowerCAmelCase : Tuple = torch.tensor( [ [[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]], [[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]], [[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]], ] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-4 ) else: assert logits.shape == (1, 1_0_0_0) if mobilevit_name == "mobilevit_s": __lowerCAmelCase : Optional[Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] ) elif mobilevit_name == "mobilevit_xs": __lowerCAmelCase : str = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] ) elif mobilevit_name == "mobilevit_xxs": __lowerCAmelCase : Tuple = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase__ ) if push_to_hub: __lowerCAmelCase : Union[str, Any] = { '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) __lowerCAmelCase : str = model_mapping[mobilevit_name] image_processor.push_to_hub(lowercase__ , organization='''apple''' ) model.push_to_hub(lowercase__ , organization='''apple''' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs'," " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _UpperCamelCase = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
583
1
import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available lowercase__ : Optional[Any] = logging.getLogger(__name__) @dataclass class UpperCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 @dataclass class UpperCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = None class UpperCAmelCase ( __a ): '''simple docstring''' lowerCAmelCase_ = '''train''' lowerCAmelCase_ = '''dev''' lowerCAmelCase_ = '''test''' class UpperCAmelCase : '''simple docstring''' @staticmethod def snake_case__ ( __lowercase : int , __lowercase : Union[Split, str] ): """simple docstring""" raise NotImplementedError @staticmethod def snake_case__ ( __lowercase : str ): """simple docstring""" raise NotImplementedError @staticmethod def snake_case__ ( __lowercase : List[InputExample] , __lowercase : List[str] , __lowercase : int , __lowercase : PreTrainedTokenizer , __lowercase : Optional[int]=False , __lowercase : str="[CLS]" , __lowercase : Optional[Any]=1 , __lowercase : Dict="[SEP]" , __lowercase : Optional[Any]=False , __lowercase : List[str]=False , __lowercase : str=0 , __lowercase : List[str]=0 , __lowercase : int=-1_00 , __lowercase : Any=0 , __lowercase : Tuple=True , ): """simple docstring""" snake_case_ = {label: i for i, label in enumerate(__lowerCAmelCase )} snake_case_ = [] for ex_index, example in enumerate(__lowerCAmelCase ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d of %d" , __lowerCAmelCase , len(__lowerCAmelCase ) ) snake_case_ = [] snake_case_ = [] for word, label in zip(example.words , example.labels ): snake_case_ = tokenizer.tokenize(__lowerCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__lowerCAmelCase ) > 0: tokens.extend(__lowerCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. snake_case_ = tokenizer.num_special_tokens_to_add() if len(__lowerCAmelCase ) > max_seq_length - special_tokens_count: snake_case_ = tokens[: (max_seq_length - special_tokens_count)] snake_case_ = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] snake_case_ = [sequence_a_segment_id] * len(__lowerCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: snake_case_ = [cls_token] + tokens snake_case_ = [pad_token_label_id] + label_ids snake_case_ = [cls_token_segment_id] + segment_ids snake_case_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. snake_case_ = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase ) # Zero-pad up to the sequence length. snake_case_ = max_seq_length - len(__lowerCAmelCase ) if pad_on_left: snake_case_ = ([pad_token] * padding_length) + input_ids snake_case_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask snake_case_ = ([pad_token_segment_id] * padding_length) + segment_ids snake_case_ = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__lowerCAmelCase ) == max_seq_length assert len(__lowerCAmelCase ) == max_seq_length assert len(__lowerCAmelCase ) == max_seq_length assert len(__lowerCAmelCase ) == max_seq_length if ex_index < 5: logger.info("*** Example ***" ) logger.info("guid: %s" , example.guid ) logger.info("tokens: %s" , " ".join([str(__lowerCAmelCase ) for x in tokens] ) ) logger.info("input_ids: %s" , " ".join([str(__lowerCAmelCase ) for x in input_ids] ) ) logger.info("input_mask: %s" , " ".join([str(__lowerCAmelCase ) for x in input_mask] ) ) logger.info("segment_ids: %s" , " ".join([str(__lowerCAmelCase ) for x in segment_ids] ) ) logger.info("label_ids: %s" , " ".join([str(__lowerCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ = None features.append( InputFeatures( input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class UpperCAmelCase ( __a ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = nn.CrossEntropyLoss().ignore_index def __init__( self : str , __lowercase : TokenClassificationTask , __lowercase : str , __lowercase : PreTrainedTokenizer , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[int] = None , __lowercase : Optional[Any]=False , __lowercase : Split = Split.train , ): """simple docstring""" snake_case_ = os.path.join( __lowerCAmelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case_ = cached_features_file + ".lock" with FileLock(__lowerCAmelCase ): if os.path.exists(__lowerCAmelCase ) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}" ) snake_case_ = torch.load(__lowerCAmelCase ) else: logger.info(f"Creating features from dataset file at {data_dir}" ) snake_case_ = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ = token_classification_task.convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"Saving features into cached file {cached_features_file}" ) torch.save(self.features , __lowerCAmelCase ) def __len__( self : Optional[Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Union[str, Any] , __lowercase : List[str] ): """simple docstring""" return self.features[i] if is_tf_available(): import tensorflow as tf class UpperCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = -100 def __init__( self : Any , __lowercase : TokenClassificationTask , __lowercase : str , __lowercase : PreTrainedTokenizer , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[int] = None , __lowercase : Tuple=False , __lowercase : Split = Split.train , ): """simple docstring""" snake_case_ = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers snake_case_ = token_classification_task.convert_examples_to_features( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: snake_case_ = tf.data.Dataset.from_generator( __lowerCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , ( {"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: snake_case_ = tf.data.Dataset.from_generator( __lowerCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , ( { "input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] ), "token_type_ids": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : List[Any] ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , __lowercase : int ): """simple docstring""" return self.features[i]
376
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" __A = ["image_processor", "tokenizer"] __A = "FlavaImageProcessor" __A = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __lowerCAmelCase , ) _lowerCAmelCase = kwargs.pop('feature_extractor' ) _lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCAmelCase = self.image_processor def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[ImageInput] = None , __lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ): """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _lowerCAmelCase = self.tokenizer( text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) if images is not None: _lowerCAmelCase = self.image_processor( __lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) if text is not None and images is not None: encoding.update(__lowerCAmelCase ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def a ( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def a ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def a ( self : List[str] ): """simple docstring""" _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a ( self : Optional[int] ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , ) return self.image_processor_class @property def a ( self : Any ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , ) return self.image_processor
309
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = self.prepare_config_and_inputs() UpperCamelCase__ = True UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = True UpperCamelCase__ = NezhaModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_choices UpperCamelCase__ = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # This regression test was failing with PyTorch < 1.3 ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return UpperCamelCase__ = True UpperCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""" ) ) UpperCamelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""" ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
86
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ): '''simple docstring''' UpperCamelCase__ = size[0] - overlap_pixels * 2 UpperCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 ) if "l" in remove_borders: UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ): '''simple docstring''' return max(__a , min(__a , __a ) ) def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __magic_name__ ( __a : [int] , __a : int , __a : [int] ): '''simple docstring''' UpperCamelCase__ = list(__a ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] ) return rect def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__a , (original_slice, 0) ) return result def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCamelCase__ = tile.crop(__a ) return tile def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = n % d return n - divisor class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ): super().__init__( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): torch.manual_seed(0 ) UpperCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size ) UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCamelCase__ = translated_slice_x - (original_image_slice / 2) UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = to_input.size UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0] UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , ) final_image.paste( SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) UpperCamelCase__ = math.ceil(image.size[0] / tile_size ) UpperCamelCase__ = math.ceil(image.size[1] / tile_size ) UpperCamelCase__ = tcx * tcy UpperCamelCase__ = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCamelCase__ = pipe.to("""cuda""" ) UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__a : Optional[int] ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
86
1
"""simple docstring""" def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] ) -> float: '''simple docstring''' return base * power(__UpperCAmelCase , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') __A = int(input('''Enter the base: ''').strip()) __A = int(input('''Enter the exponent: ''').strip()) __A = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents __A = 1 / result print(F"""{base} to the power of {exponent} is {result}""")
646
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
159
0
import argparse import json from tqdm import tqdm def A ( ): """simple docstring""" UpperCAmelCase__ :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=SCREAMING_SNAKE_CASE , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=SCREAMING_SNAKE_CASE , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=SCREAMING_SNAKE_CASE , help='where to store parsed gold_data_path file' , ) UpperCAmelCase__ :Union[str, Any] = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: UpperCAmelCase__ :Tuple = json.load(SCREAMING_SNAKE_CASE ) for dpr_record in tqdm(SCREAMING_SNAKE_CASE ): UpperCAmelCase__ :Dict = dpr_record['question'] UpperCAmelCase__ :Optional[Any] = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(SCREAMING_SNAKE_CASE ) + '\n' ) if __name__ == "__main__": main()
433
from math import isqrt def A ( SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase__ :str = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCAmelCase__ :List[Any] = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def A ( SCREAMING_SNAKE_CASE = 10**8 ): """simple docstring""" UpperCAmelCase__ :Any = calculate_prime_numbers(max_number // 2 ) UpperCAmelCase__ :Optional[Any] = 0 UpperCAmelCase__ :List[str] = 0 UpperCAmelCase__ :Dict = len(SCREAMING_SNAKE_CASE ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
433
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { "configuration_audio_spectrogram_transformer": [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ASTConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ASTForAudioClassification", "ASTModel", "ASTPreTrainedModel", ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["ASTFeatureExtractor"] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
469
from datetime import datetime as dt import os from github import Github __A = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def lowerCamelCase_ ( ) -> int: """simple docstring""" __lowerCamelCase = Github(os.environ['GITHUB_TOKEN'] ) __lowerCamelCase = g.get_repo('huggingface/transformers' ) __lowerCamelCase = repo.get_issues(state='open' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ ) __lowerCamelCase = comments[0] if len(UpperCamelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
469
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor UpperCAmelCase__ = random.Random() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=1.0 , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None ) -> int: if rng is None: _snake_case = global_rng _snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Any=7 , _lowerCamelCase : List[Any]=400 , _lowerCamelCase : List[Any]=2000 , _lowerCamelCase : Optional[int]=24 , _lowerCamelCase : Dict=24 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[str]=16000 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=True , ): _snake_case = parent _snake_case = batch_size _snake_case = min_seq_length _snake_case = max_seq_length _snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _snake_case = feature_size _snake_case = num_mel_bins _snake_case = padding_value _snake_case = sampling_rate _snake_case = return_attention_mask _snake_case = do_normalize def lowercase ( self : Tuple ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase ( self : Optional[int] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[Any]=False ): def _flatten(_lowerCamelCase : int ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: _snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _snake_case = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase__ ( __A , unittest.TestCase ): __a = SpeechaTextFeatureExtractor if is_speech_available() else None def lowercase ( self : Optional[Any] ): _snake_case = SpeechaTextFeatureExtractionTester(self ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[Any] ): self.assertTrue(np.all(np.mean(_lowerCamelCase , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) ) def lowercase ( self : Tuple ): _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test feature size _snake_case = feature_extractor(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input _snake_case = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features _snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) # Test batched _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)] _snake_case = np.asarray(_lowerCamelCase ) _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) ) def lowercase ( self : Optional[int] ): _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = ['longest', 'max_length', 'do_not_pad'] _snake_case = [None, 16, None] for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ): _snake_case = feature_extractor( _lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_attention_mask=_lowerCamelCase ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = [np.sum(_lowerCamelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase ( self : Any ): _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = ['longest', 'max_length', 'do_not_pad'] _snake_case = [None, 16, None] for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase ): _snake_case = feature_extractor( _lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = [np.sum(_lowerCamelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowercase ( self : Optional[Any] ): _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( _lowerCamelCase , padding='''max_length''' , max_length=4 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowercase ( self : int ): _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( _lowerCamelCase , padding='''longest''' , max_length=4 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) _snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _snake_case = feature_extractor( _lowerCamelCase , padding='''longest''' , max_length=16 , truncation=_lowerCamelCase , return_tensors='''np''' , return_attention_mask=_lowerCamelCase , ) _snake_case = inputs.input_features _snake_case = inputs.attention_mask _snake_case = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowercase ( self : Dict ): import torch _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = np.random.rand(100 , 32 ).astype(np.floataa ) _snake_case = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[str] ): from datasets import load_dataset _snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _snake_case = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowercase ( self : str ): _snake_case = np.array([ -1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1, -1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8, -1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5, ] ) # fmt: on _snake_case = self._load_datasamples(1 ) _snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _lowerCamelCase , atol=1e-4 ) )
704
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str = "cpu" , __lowerCamelCase : Union[str, None] = None ) -> None: _snake_case = torch.load(__lowerCamelCase , map_location=__lowerCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(__lowerCamelCase , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) _snake_case = v.half() if save_path is None: # overwrite src_path _snake_case = src_path torch.save(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": fire.Fire(convert)
430
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=A__ ): """simple docstring""" a_ = ["note_seq"] def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): '''simple docstring''' requires_backends(self , ["""note_seq"""] ) @classmethod def _lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): '''simple docstring''' requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowerCAmelCase ( cls , *lowerCAmelCase_ , **lowerCAmelCase_ ): '''simple docstring''' requires_backends(cls , ["""note_seq"""] )
577
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowercase = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def __lowerCAmelCase ( _UpperCamelCase ) -> str: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_UpperCamelCase ) def __lowerCAmelCase ( _UpperCamelCase ) -> Any: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowerCamelCase__: Optional[int] = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(_UpperCamelCase , id=_UpperCamelCase )
306
0
'''simple docstring''' def _lowercase ( ) -> int: """simple docstring""" return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(lowerCamelCase__ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : List[str] = logging.get_logger(__name__) _a : Any = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __A (__magic_name__ ): snake_case :Union[str, Any] = "ibert" def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : str = max_position_embeddings __UpperCAmelCase : List[str] = type_vocab_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : Any = position_embedding_type __UpperCAmelCase : Tuple = quant_mode __UpperCAmelCase : Union[str, Any] = force_dequant class __A (__magic_name__ ): @property def _snake_case ( self ): if self.task == "multiple-choice": __UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: __UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
10
1
from typing import Any class __lowercase : def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' lowercase = data lowercase = None class __lowercase : def __init__( self : Any ) -> int: '''simple docstring''' lowercase = None def __a ( self : int ) -> Optional[Any]: '''simple docstring''' lowercase = self.head while temp is not None: print(temp.data , end=''' ''' ) lowercase = temp.next print() def __a ( self : Dict , __lowerCamelCase : int ) -> Any: '''simple docstring''' lowercase = Node(a_ ) lowercase = self.head lowercase = new_node def __a ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> List[Any]: '''simple docstring''' if node_data_a == node_data_a: return else: lowercase = self.head while node_a is not None and node_a.data != node_data_a: lowercase = node_a.next lowercase = self.head while node_a is not None and node_a.data != node_data_a: lowercase = node_a.next if node_a is None or node_a is None: return lowercase = node_a.data, node_a.data if __name__ == "__main__": A_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("After swapping") ll.print_list()
604
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter lowerCAmelCase = True except ImportError: lowerCAmelCase = False lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def __A ( a_ : Namespace ): return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path ) class lowerCamelCase ( _A ): @staticmethod def _lowerCamelCase ( a_ ): lowerCAmelCase : Optional[int] = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" , type=a_ , help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" , type=a_ , help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=a_ ) def __init__( self , a_ , a_ , a_=None , *a_ ): lowerCAmelCase : Any = testing lowerCAmelCase : str = testing_file lowerCAmelCase : Optional[int] = path def _lowerCamelCase ( self ): warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(a_ ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) lowerCAmelCase : List[str] = ( Path(a_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowerCAmelCase : str = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(a_ ) ) else: with open(self._testing_file , "r" ) as configuration_file: lowerCAmelCase : Dict = json.load(a_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=a_ , extra_context=a_ , ) lowerCAmelCase : Optional[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" , "r" ) as configuration_file: lowerCAmelCase : str = json.load(a_ ) lowerCAmelCase : List[str] = configuration["lowercase_modelname"] lowerCAmelCase : List[Any] = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(F'''{directory}/configuration.json''' ) lowerCAmelCase : Optional[int] = "PyTorch" in generate_tensorflow_pytorch_and_flax lowerCAmelCase : Optional[int] = "TensorFlow" in generate_tensorflow_pytorch_and_flax lowerCAmelCase : Optional[Any] = "Flax" in generate_tensorflow_pytorch_and_flax lowerCAmelCase : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(a_ , exist_ok=a_ ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=a_ ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(a_ ): with open(a_ , "r" ) as f: lowerCAmelCase : Dict = f.readlines() with open(a_ , "w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(a_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(a_ , a_ , a_ ): # Create temp file lowerCAmelCase , lowerCAmelCase : Tuple = mkstemp() lowerCAmelCase : int = False with fdopen(a_ , "w" ) as new_file: with open(a_ ) as old_file: for line in old_file: new_file.write(a_ ) if line_to_copy_below in line: lowerCAmelCase : Optional[int] = True for line_to_copy in lines_to_copy: new_file.write(a_ ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(a_ , a_ ) # Remove original file remove(a_ ) # Move new file move(a_ , a_ ) def skip_units(a_ ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(a_ ): with open(a_ ) as datafile: lowerCAmelCase : Dict = [] lowerCAmelCase : Optional[int] = False lowerCAmelCase : List[Any] = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCAmelCase : Optional[int] = line.split("\"" )[1] lowerCAmelCase : Tuple = skip_units(a_ ) elif "# Below: " in line and "##" not in line: lowerCAmelCase : Any = line.split("\"" )[1] lowerCAmelCase : List[str] = skip_units(a_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(a_ , a_ , a_ ) lowerCAmelCase : List[str] = [] elif "# Replace with" in line and "##" not in line: lowerCAmelCase : Any = [] elif "##" not in line: lines_to_copy.append(a_ ) remove(a_ ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(a_ )
525
0
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = HfArgumentParser(__magic_name__ ) snake_case_ : str = parser.parse_args_into_dataclasses()[0] snake_case_ : Tuple = TensorFlowBenchmark(args=__magic_name__ ) try: snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ : Dict = "Arg --no_{0} is no longer used, please use --no-{0} instead." snake_case_ : Tuple = " ".join(str(__magic_name__ ).split(" " )[:-1] ) snake_case_ : Any = "" snake_case_ : Tuple = eval(str(__magic_name__ ).split(" " )[-1] ) snake_case_ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Union[str, Any] = full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: A_ : Union[str, Any] = None A_ : Optional[int] = logging.get_logger(__name__) A_ : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} A_ : Tuple = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } A_ : Dict = { 'camembert-base': 512, } A_ : Optional[Any] = '▁' class A_ ( UpperCAmelCase_ ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] a__ = CamembertTokenizer def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase__ , ) -> Tuple: __UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( __a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , ) __UpperCAmelCase = vocab_file __UpperCAmelCase = False if not self.vocab_file else True def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> int: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] __UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[str]: __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Dict: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__a ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase = os.path.join( __a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
303
def __lowercase( UpperCAmelCase__ ): """simple docstring""" if n == 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): return 0 elif n == 2: return 1 else: lowerCamelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __lowercase( UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = 0 lowerCamelCase = 2 while digits < n: index += 1 lowerCamelCase = len(str(fibonacci(UpperCAmelCase__ ) ) ) return index def __lowercase( UpperCAmelCase__ = 1000 ): """simple docstring""" return fibonacci_digits_index(UpperCAmelCase__ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
623
0
"""simple docstring""" import os def UpperCamelCase ( _lowerCAmelCase : str = "input.txt" ): with open(os.path.join(os.path.dirname(_lowerCAmelCase ) , _lowerCAmelCase ) ) as input_file: __a = [ [int(_lowerCAmelCase ) for element in line.split(""",""" )] for line in input_file.readlines() ] __a = len(_lowerCAmelCase ) __a = len(matrix[0] ) __a = [[-1 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase ): __a = matrix[i][0] for j in range(1 , _lowerCAmelCase ): for i in range(_lowerCAmelCase ): __a = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _lowerCAmelCase ): __a = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __a = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
173
"""simple docstring""" __A = 6_55_21 def UpperCamelCase ( _lowerCAmelCase : str ): __a = 1 __a = 0 for plain_chr in plain_text: __a = (a + ord(_lowerCAmelCase )) % MOD_ADLER __a = (b + a) % MOD_ADLER return (b << 16) | a
173
1
'''simple docstring''' from PIL import Image def UpperCamelCase ( a ) -> Image: '''simple docstring''' __magic_name__ , __magic_name__ = image.size __magic_name__ = 0 __magic_name__ = image.load() for i in range(a ): for j in range(a ): __magic_name__ = pixels[j, i] mean += pixel mean //= width * height for j in range(a ): for i in range(a ): __magic_name__ = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _lowerCAmelCase = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
432
'''simple docstring''' from __future__ import annotations from math import pi def UpperCamelCase ( a , a , a ) -> dict[str, float]: '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if inductance < 0: raise ValueError('''Inductance cannot be negative''' ) if frequency < 0: raise ValueError('''Frequency cannot be negative''' ) if reactance < 0: raise ValueError('''Inductive reactance cannot be negative''' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
432
1
def A (__A : str ) -> int: """simple docstring""" if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[str] = F"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase_ ) if number < 1: UpperCAmelCase_ : List[Any] = F"""Input value of [number={number}] must be > 0""" raise ValueError(lowercase_ ) UpperCAmelCase_ : Tuple = 1 for i in range(1 , lowercase_ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
707
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class __snake_case : def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError('''Destination width/height should be > 0''') UpperCAmelCase_ = img UpperCAmelCase_ = img.shape[1] UpperCAmelCase_ = img.shape[0] UpperCAmelCase_ = dst_width UpperCAmelCase_ = dst_height UpperCAmelCase_ = self.src_w / self.dst_w UpperCAmelCase_ = self.src_h / self.dst_h UpperCAmelCase_ = UpperCAmelCase_ = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255 ) def lowerCamelCase ( self : str): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)] def lowerCamelCase ( self : int , _snake_case : int): """simple docstring""" return int(self.ratio_x * x) def lowerCamelCase ( self : List[str] , _snake_case : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": snake_case_ , snake_case_ : List[Any] = 800, 600 snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1) snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
169
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase__ ( UpperCamelCase_, unittest.TestCase ): _UpperCAmelCase :int = DiTPipeline _UpperCAmelCase :int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _UpperCAmelCase :str = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } _UpperCAmelCase :Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _UpperCAmelCase :Any = False def UpperCAmelCase__ ( self : Dict ): torch.manual_seed(0 ) lowerCamelCase_ : Optional[Any] =TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=A_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=A_ , ) lowerCamelCase_ : int =AutoencoderKL() lowerCamelCase_ : Any =DDIMScheduler() lowerCamelCase_ : Optional[Any] ={'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict=0 ): if str(A_ ).startswith("mps" ): lowerCamelCase_ : Optional[Any] =torch.manual_seed(A_ ) else: lowerCamelCase_ : Tuple =torch.Generator(device=A_ ).manual_seed(A_ ) lowerCamelCase_ : List[str] ={ '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : List[Any] ): lowerCamelCase_ : Optional[Any] ='''cpu''' lowerCamelCase_ : Union[str, Any] =self.get_dummy_components() lowerCamelCase_ : List[str] =self.pipeline_class(**A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowerCamelCase_ : List[str] =self.get_dummy_inputs(A_ ) lowerCamelCase_ : str =pipe(**A_ ).images lowerCamelCase_ : str =image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCamelCase_ : Optional[Any] =np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) lowerCamelCase_ : Tuple =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A_ , 1E-3 ) def UpperCAmelCase__ ( self : Optional[Any] ): self._test_inference_batch_single_identical(relax_max_difference=A_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCAmelCase__ ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase__ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Any =torch.manual_seed(0 ) lowerCamelCase_ : List[str] =DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowerCamelCase_ : Dict =['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] lowerCamelCase_ : Tuple =pipe.get_label_ids(A_ ) lowerCamelCase_ : Any =pipe(A_ , generator=A_ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(A_ , A_ ): lowerCamelCase_ : int =load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCAmelCase__ ( self : Dict ): lowerCamelCase_ : str =DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowerCamelCase_ : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowerCamelCase_ : Union[str, Any] =['''vase''', '''umbrella'''] lowerCamelCase_ : List[str] =pipe.get_label_ids(A_ ) lowerCamelCase_ : int =torch.manual_seed(0 ) lowerCamelCase_ : List[Any] =pipe(A_ , generator=A_ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(A_ , A_ ): lowerCamelCase_ : int =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" F"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
153
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar A__ : Tuple = TypeVar('''KEY''') A__ : List[Any] = TypeVar('''VAL''') @dataclass(frozen=UpperCamelCase_ ,slots=UpperCamelCase_ ) class __snake_case ( Generic[KEY, VAL] ): _a = 42 _a = 42 class __snake_case ( _Item ): def __init__( self : Union[str, Any]): super().__init__(A_ , A_) def __bool__( self : int): return False A__ : Optional[int] = _DeletedItem() class __snake_case ( MutableMapping[KEY, VAL] ): def __init__( self : Optional[Any] , A_ : int = 8 , A_ : float = 0.75): lowerCAmelCase_ : Optional[Any] = initial_block_size lowerCAmelCase_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase_ : str = capacity_factor lowerCAmelCase_ : Union[str, Any] = 0 def UpperCAmelCase__ ( self : str , A_ : KEY): return hash(A_) % len(self._buckets) def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int): return (ind + 1) % len(self._buckets) def UpperCAmelCase__ ( self : Tuple , A_ : int , A_ : KEY , A_ : VAL): lowerCAmelCase_ : str = self._buckets[ind] if not stored: lowerCAmelCase_ : Optional[int] = _Item(A_ , A_) self._len += 1 return True elif stored.key == key: lowerCAmelCase_ : Union[str, Any] = _Item(A_ , A_) return True else: return False def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : List[Any] = len(self._buckets) * self._capacity_factor return len(self) >= int(A_) def UpperCAmelCase__ ( self : List[Any]): if len(self._buckets) <= self._initial_block_size: return False lowerCAmelCase_ : Any = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def UpperCAmelCase__ ( self : Optional[Any] , A_ : int): lowerCAmelCase_ : List[str] = self._buckets lowerCAmelCase_ : str = [None] * new_size lowerCAmelCase_ : Optional[Any] = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def UpperCAmelCase__ ( self : int): self._resize(len(self._buckets) * 2) def UpperCAmelCase__ ( self : Dict): self._resize(len(self._buckets) // 2) def UpperCAmelCase__ ( self : List[str] , A_ : KEY): lowerCAmelCase_ : str = self._get_bucket_index(A_) for _ in range(len(self._buckets)): yield ind lowerCAmelCase_ : Tuple = self._get_next_ind(A_) def UpperCAmelCase__ ( self : List[Any] , A_ : KEY , A_ : VAL): for ind in self._iterate_buckets(A_): if self._try_set(A_ , A_ , A_): break def __setitem__( self : int , A_ : KEY , A_ : VAL): if self._is_full(): self._size_up() self._add_item(A_ , A_) def __delitem__( self : Tuple , A_ : KEY): for ind in self._iterate_buckets(A_): lowerCAmelCase_ : Tuple = self._buckets[ind] if item is None: raise KeyError(A_) if item is _deleted: continue if item.key == key: lowerCAmelCase_ : Dict = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : List[str] , A_ : KEY): for ind in self._iterate_buckets(A_): lowerCAmelCase_ : Tuple = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(A_) def __len__( self : Dict): return self._len def __iter__( self : Dict): yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any]): lowerCAmelCase_ : List[Any] = ''' ,'''.join( F"""{item.key}: {item.val}""" for item in self._buckets if item) return F"""HashMap({val_string})"""
171
0
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCAmelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> List[Any]: '''simple docstring''' super().__init__() lowerCamelCase_ = pad_token_id lowerCamelCase_ = max_length lowerCamelCase_ = vocab lowerCamelCase_ = merges lowerCamelCase_ = BytePairTokenizer(UpperCamelCase__ , UpperCamelCase__ , sequence_length=UpperCamelCase__ ) @classmethod def _lowerCAmelCase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]: '''simple docstring''' lowerCamelCase_ = [''' '''.join(UpperCamelCase__ ) for m in tokenizer.bpe_ranks.keys()] lowerCamelCase_ = tokenizer.get_vocab() return cls(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _lowerCAmelCase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> str: '''simple docstring''' lowerCamelCase_ = GPTaTokenizer.from_pretrained(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) return cls.from_tokenizer(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _lowerCAmelCase ( cls , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' return cls(**UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> int: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Any: '''simple docstring''' lowerCamelCase_ = self.tf_tokenizer(UpperCamelCase__ ) lowerCamelCase_ = tf.ones_like(UpperCamelCase__ ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCamelCase_ = max_length if max_length is not None else self.max_length if max_length is not None: lowerCamelCase_ , lowerCamelCase_ = pad_model_inputs( UpperCamelCase__ , max_seq_length=UpperCamelCase__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
718
"""simple docstring""" import torch from diffusers import DiffusionPipeline class lowerCAmelCase ( a ): """simple docstring""" def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict: '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) def __call__( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowerCamelCase_ = 1 lowerCamelCase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample lowerCamelCase_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample lowerCamelCase_ = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase__ ) return result
66
0
from itertools import product def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]: """simple docstring""" lowercase : Optional[int] = sides_number lowercase : List[Any] = max_face_number * dice_number lowercase : Union[str, Any] = [0] * (max_total + 1) lowercase : List[Any] = 1 lowercase : int = range(_UpperCamelCase, max_face_number + 1 ) for dice_numbers in product(_UpperCamelCase, repeat=_UpperCamelCase ): lowercase : Tuple = sum(_UpperCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def __lowercase ( ) ->float: """simple docstring""" lowercase : Tuple = total_frequency_distribution( sides_number=4, dice_number=9 ) lowercase : Tuple = total_frequency_distribution( sides_number=6, dice_number=6 ) lowercase : List[str] = 0 lowercase : Any = 9 lowercase : Optional[Any] = 4 * 9 lowercase : List[str] = 6 for peter_total in range(_UpperCamelCase, max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowercase : str = (4**9) * (6**6) lowercase : Optional[int] = peter_wins_count / total_games_number lowercase : Tuple = round(_UpperCamelCase, ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
319
import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , ): lowercase : Optional[Any] = parent lowercase : Tuple = batch_size lowercase : Any = image_size lowercase : Union[str, Any] = patch_size lowercase : Optional[Any] = num_channels lowercase : Optional[int] = is_training lowercase : Dict = use_labels lowercase : Union[str, Any] = hidden_size lowercase : Dict = num_hidden_layers lowercase : Optional[Any] = backbone_out_indices lowercase : Dict = num_attention_heads lowercase : Tuple = intermediate_size lowercase : Union[str, Any] = hidden_act lowercase : str = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : str = num_labels lowercase : str = backbone_featmap_shape lowercase : Optional[int] = scope lowercase : int = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) lowercase : List[Any] = (image_size // patch_size) ** 2 lowercase : int = num_patches + 1 def __lowerCamelCase ( self ): lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : int = None if self.use_labels: lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase : Union[str, Any] = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): lowercase : List[str] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 192, 384, 768], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE__ , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = DPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = self.num_labels lowercase : Optional[Any] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Dict = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = self.num_labels lowercase : List[str] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCamelCase ( self ): lowercase : List[Any] = self.prepare_config_and_inputs() lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs lowercase : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () A : Optional[int] = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) A : str = False A : Union[str, Any] = False A : List[Any] = False def __lowerCamelCase ( self ): lowercase : List[str] = DPTModelTester(self ) lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : str = model_class(SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) ) def __lowerCamelCase ( self ): lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : List[str] = [*signature.parameters.keys()] lowercase : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Tuple = True if model_class in get_values(SCREAMING_SNAKE_CASE__ ): continue lowercase : List[str] = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.train() lowercase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss loss.backward() def __lowerCamelCase ( self ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Dict = False lowercase : Optional[int] = True if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing: continue lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.gradient_checkpointing_enable() model.train() lowercase : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss loss.backward() def __lowerCamelCase ( self ): lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Optional[Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ ) for model_class in self.all_model_classes: lowercase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__ ) # Skip the check for the backbone lowercase : Tuple = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": lowercase : List[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCamelCase ( self ): pass @slow def __lowerCamelCase ( self ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: lowercase : Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Any = '''add''' with self.assertRaises(SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ ) def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) lowercase : Optional[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE__ ) lowercase : Any = prepare_img() lowercase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): lowercase : Any = model(**SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = outputs.predicted_depth # verify the predicted depth lowercase : str = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
319
1
"""simple docstring""" from math import pi, sqrt def lowercase ( a__ : float ) -> float: if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(a__ ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(a__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def lowercase ( ) -> None: assert gamma(0.5 ) == sqrt(a__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = 1.0 while num: UpperCAmelCase = float(input("""Gamma of: """)) print(F'''gamma({num}) = {gamma(num)}''') print("""\nEnter 0 to exit...""")
342
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : List[Any] ) -> List[str]: _UpperCamelCase = 0 def _UpperCamelCase ( self : Any ) -> str: _UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Any ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json''' _UpperCamelCase = Path(__UpperCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Optional[Any] ) -> List[str]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json''' _UpperCamelCase = Path(__UpperCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : int ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = CLIPConfig() # Create a dummy config file with image_proceesor_type _UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json''' _UpperCamelCase = Path(__UpperCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict() config_dict.pop('''image_processor_type''' ) _UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase ) # save in new folder model_config.save_pretrained(__UpperCamelCase ) config.save_pretrained(__UpperCamelCase ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ) # make sure private variable is not incorrectly saved _UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : List[Any] ) -> List[Any]: with self.assertRaisesRegex( __UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' ) def _UpperCamelCase ( self : Dict ) -> Union[str, Any]: with self.assertRaisesRegex( __UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' ) def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: with self.assertRaisesRegex( __UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _UpperCamelCase ( self : int ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__UpperCamelCase ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__UpperCamelCase ): _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase ) _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCamelCase ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def _UpperCamelCase ( self : Optional[int] ) -> List[Any]: try: AutoConfig.register('''custom''' , __UpperCamelCase ) AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__UpperCamelCase ): AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json''' _UpperCamelCase = Path(__UpperCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) ) _UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCamelCase ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _UpperCamelCase ( self : List[str] ) -> Optional[Any]: class UpperCAmelCase_ ( _lowercase): snake_case__ = True try: AutoConfig.register('''custom''' , __UpperCamelCase ) AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase ) # If remote code is not set, the default is to use local _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
342
1
import math def _a ( lowercase__ : int , lowercase__ : Tuple = 0 , lowercase__ : List[Any] = 0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = end or len(lowerCamelCase__ ) for i in range(lowerCamelCase__ , lowerCamelCase__ ): SCREAMING_SNAKE_CASE__ : int = i SCREAMING_SNAKE_CASE__ : int = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: SCREAMING_SNAKE_CASE__ : Optional[int] = array[temp_index - 1] temp_index -= 1 SCREAMING_SNAKE_CASE__ : List[str] = temp_index_value return array def _a ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[Any] ): # Max Heap '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = index SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 * index + 1 # Left Node SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: SCREAMING_SNAKE_CASE__ : Dict = left_index if right_index < heap_size and array[largest] < array[right_index]: SCREAMING_SNAKE_CASE__ : Dict = right_index if largest != index: SCREAMING_SNAKE_CASE__ : str = array[largest], array[index] heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def _a ( lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowerCamelCase__ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i in range(n - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__ : Tuple = array[0], array[i] heapify(lowerCamelCase__ , 0 , lowerCamelCase__ ) return array def _a ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _a ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = low SCREAMING_SNAKE_CASE__ : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i SCREAMING_SNAKE_CASE__ : Union[str, Any] = array[j], array[i] i += 1 def _a ( lowercase__ : List[Any] ): '''simple docstring''' if len(lowerCamelCase__ ) == 0: return array SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) ) SCREAMING_SNAKE_CASE__ : Dict = 16 return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ ) def _a ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCamelCase__ ) max_depth -= 1 SCREAMING_SNAKE_CASE__ : Optional[int] = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = p return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : Any = input("Enter numbers separated by a comma : ").strip() SCREAMING_SNAKE_CASE__ : List[str] = [float(item) for item in user_input.split(",")] print(sort(unsorted))
85
'''simple docstring''' import functools def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int: """simple docstring""" __UpperCAmelCase : List[str] = len(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) @functools.cache def min_distance(lowerCamelCase__ , lowerCamelCase__ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __UpperCAmelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase__ ) , 1 + min_distance(lowerCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
168
0
from manim import * class lowerCAmelCase ( __UpperCamelCase ): def A_ ( self : str ) -> List[Any]: lowerCamelCase__ : Optional[int] = Rectangle(height=0.5 , width=0.5 ) lowerCamelCase__ : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) lowerCamelCase__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 ) lowerCamelCase__ : Union[str, Any] = [mem.copy() for i in range(6 )] lowerCamelCase__ : str = [mem.copy() for i in range(6 )] lowerCamelCase__ : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : Tuple = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : Dict = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : int = Text('CPU' , font_size=24 ) lowerCamelCase__ : str = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase ) lowerCamelCase__ : str = [mem.copy() for i in range(4 )] lowerCamelCase__ : Union[str, Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : List[Any] = Text('GPU' , font_size=24 ) lowerCamelCase__ : Optional[int] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(UpperCAmelCase ) lowerCamelCase__ : int = [mem.copy() for i in range(6 )] lowerCamelCase__ : Union[str, Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : int = Text('Model' , font_size=24 ) lowerCamelCase__ : List[str] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = [] lowerCamelCase__ : Optional[Any] = [] for i, rect in enumerate(UpperCAmelCase ): lowerCamelCase__ : int = fill.copy().set_fill(UpperCAmelCase , opacity=0.8 ) target.move_to(UpperCAmelCase ) model_arr.append(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(UpperCAmelCase ) self.add(*UpperCAmelCase , *UpperCAmelCase ) lowerCamelCase__ : str = [meta_mem.copy() for i in range(6 )] lowerCamelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCamelCase__ : List[Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : int = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : List[Any] = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 ) lowerCamelCase__ : Optional[int] = Text('Disk' , font_size=24 ) lowerCamelCase__ : Optional[int] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase ) disk.move_to([-4, -1.2_5, 0] ) self.add(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase__ : List[Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCAmelCase ) lowerCamelCase__ : str = MarkupText( F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase ) ) lowerCamelCase__ : str = Square(0.3 ) input.set_fill(UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 ) self.play(Write(UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.0_2 ) self.play(MoveToTarget(UpperCAmelCase ) ) self.play(FadeOut(UpperCAmelCase ) ) lowerCamelCase__ : str = Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) lowerCamelCase__ : Optional[int] = MarkupText( F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase , run_time=3 ) ) lowerCamelCase__ : Tuple = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2} self.play( Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) lowerCamelCase__ : List[Any] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) lowerCamelCase__ : Union[str, Any] = AnimationGroup( FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: lowerCamelCase__ : Union[str, Any] = 0.7 self.play( Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) lowerCamelCase__ : Optional[int] = a_c lowerCamelCase__ : Dict = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , ) lowerCamelCase__ : Dict = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) ) self.wait()
718
from __future__ import annotations from collections.abc import Generator def SCREAMING_SNAKE_CASE ( ) -> Generator[int, None, None]: lowerCamelCase__ : dict[int, int] = {} lowerCamelCase__ : Union[str, Any] = 2 while True: lowerCamelCase__ : Optional[int] = factor_map.pop(_UpperCAmelCase , _UpperCAmelCase ) if factor: lowerCamelCase__ : Optional[Any] = factor + prime while x in factor_map: x += factor lowerCamelCase__ : Optional[Any] = factor else: lowerCamelCase__ : Union[str, Any] = prime yield prime prime += 1 def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 1e10 ) -> int: lowerCamelCase__ : Tuple = sieve() lowerCamelCase__ : Dict = 1 while True: lowerCamelCase__ : List[Any] = next(_UpperCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_UpperCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
188
0
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , lowerCAmelCase__ ) UpperCAmelCase__ : Any = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: UpperCAmelCase__ : Any = dataset_size < in_memory_max_size else: UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Dict = is_small_dataset(lowerCAmelCase__ ) assert result == expected
75
'''simple docstring''' from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase_ : def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : str = num_channels UpperCAmelCase__ : str = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : List[str] = use_input_mask UpperCAmelCase__ : Tuple = use_token_type_ids UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Any = type_sequence_label_size UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : List[str] = coordinate_size UpperCAmelCase__ : Tuple = shape_size UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = num_choices UpperCAmelCase__ : Union[str, Any] = scope UpperCAmelCase__ : Optional[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) UpperCAmelCase__ : str = text_seq_length UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1 UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) UpperCAmelCase__ : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : str = bbox[i, j, 3] UpperCAmelCase__ : Dict = bbox[i, j, 1] UpperCAmelCase__ : str = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Optional[int] = bbox[i, j, 2] UpperCAmelCase__ : Any = bbox[i, j, 0] UpperCAmelCase__ : List[Any] = tmp_coordinate UpperCAmelCase__ : str = tf.constant(_A ) UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] ) UpperCAmelCase__ : Any = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : List[str] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A ) # text + image UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A ) UpperCAmelCase__ : Tuple = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , ) UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only UpperCAmelCase__ : Any = model(_A , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.num_labels UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A ) UpperCAmelCase__ : Union[str, Any] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A ) UpperCAmelCase__ : Optional[int] = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ : str = 2 UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A ) UpperCAmelCase__ : str = model( _A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() ((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs UpperCAmelCase__ : List[Any] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase_ ( __a , __a , unittest.TestCase ): lowerCAmelCase__ = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowerCAmelCase__ = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ): '''simple docstring''' return True def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ): '''simple docstring''' UpperCAmelCase__ : List[Any] = copy.deepcopy(_A ) if model_class in get_values(_A ): UpperCAmelCase__ : Tuple = { k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(_A , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(_A ): UpperCAmelCase__ : int = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 ) def lowercase_ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[Any] = model_class(_A ) if getattr(_A , '''hf_compute_loss''' , _A ): # The number of elements in the loss should be the same as the number of elements in the label UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : List[Any] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0] ] UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) UpperCAmelCase__ : List[Any] = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: UpperCAmelCase__ : Any = -100 UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A ) UpperCAmelCase__ : int = model(_A , **_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) UpperCAmelCase__ : Dict = model(_A )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A ) # Get keys that were added with the _prepare_for_class function UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys() UpperCAmelCase__ : int = inspect.signature(model.call ).parameters UpperCAmelCase__ : Union[str, Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple UpperCAmelCase__ : Dict = {0: '''input_ids'''} for label_key in label_keys: UpperCAmelCase__ : str = signature_names.index(_A ) UpperCAmelCase__ : List[Any] = label_key UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple UpperCAmelCase__ : Tuple = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: UpperCAmelCase__ : Any = prepared_for_class[value] UpperCAmelCase__ : Tuple = tuple(_A ) # Send to model UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def lowercase_ ( self : int ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Tuple ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Union[str, Any] = type self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( _A , _A , _A , _A , _A , _A , _A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( _A , _A , _A , _A , _A , _A , _A ) @slow def lowercase_ ( self : List[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> List[str]: UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values UpperCAmelCase__ : str = tf.constant([[1, 2]] ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A ) # verify the logits UpperCAmelCase__ : Optional[int] = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , _A ) UpperCAmelCase__ : Dict = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
75
1
a_ : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' a_ : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}] a_ : List[str] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
678
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Optional[int] = 16 a_ : int = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ): model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = num_epochs if args.partial_train_epoch is not None: __magic_name__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1] __magic_name__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __magic_name__ = int(snake_case_ ) + 1 __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f: __magic_name__ = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __magic_name__ = f'epoch_{epoch}' __magic_name__ = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __magic_name__ = accuracy __magic_name__ = lr_scheduler.get_lr()[0] __magic_name__ = optimizer.param_groups[0]['''lr'''] __magic_name__ = epoch __magic_name__ = overall_step accelerator.print(f'epoch {epoch}:' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
678
1
import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } lowerCamelCase = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' for attribute in key.split('.' ): _lowerCamelCase : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: _lowerCamelCase : str =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: _lowerCamelCase : Optional[Any] =hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _lowerCamelCase : Tuple =value elif weight_type == "weight_g": _lowerCamelCase : Any =value elif weight_type == "weight_v": _lowerCamelCase : Any =value elif weight_type == "bias": _lowerCamelCase : Dict =value else: _lowerCamelCase : Union[str, Any] =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] =[] _lowerCamelCase : Optional[Any] =fairseq_model.state_dict() _lowerCamelCase : Tuple =hf_model.feature_extractor _lowerCamelCase : int =hf_model.adapter for name, value in fairseq_dict.items(): _lowerCamelCase : Optional[Any] =False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , ) _lowerCamelCase : Optional[int] =True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : int =True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _lowerCamelCase : Dict =True if "*" in mapped_key: _lowerCamelCase : List[str] =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2] _lowerCamelCase : Dict =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: _lowerCamelCase : List[Any] ='weight_g' elif "weight_v" in name: _lowerCamelCase : int ='weight_v' elif "bias" in name: _lowerCamelCase : int ='bias' elif "weight" in name: _lowerCamelCase : Optional[int] ='weight' else: _lowerCamelCase : Any =None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' _lowerCamelCase : str =full_name.split('conv_layers.' )[-1] _lowerCamelCase : List[Any] =name.split('.' ) _lowerCamelCase : Optional[int] =int(items[0] ) _lowerCamelCase : List[Any] =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _lowerCamelCase : Any =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _lowerCamelCase : int =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _lowerCamelCase : List[Any] =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _lowerCamelCase : Tuple =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' _lowerCamelCase : Optional[int] =full_name.split('adaptor.' )[-1] _lowerCamelCase : str =name.split('.' ) if items[1].isdigit(): _lowerCamelCase : Any =int(items[1] ) else: _lowerCamelCase : int =None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' _lowerCamelCase : str =value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' _lowerCamelCase : str =value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' _lowerCamelCase : Optional[Any] =value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' _lowerCamelCase : Optional[Any] =value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' _lowerCamelCase : List[Any] =value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' _lowerCamelCase : Dict =value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' _lowerCamelCase , _lowerCamelCase : List[Any] =emb.weight.shape _lowerCamelCase : str =nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : Optional[Any] =emb.weight.data return lin_layer @torch.no_grad() def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , ): '''simple docstring''' _lowerCamelCase : int =WavaVecaConfig.from_pretrained( SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , ) _lowerCamelCase : str =MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) # load model _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) _lowerCamelCase : Any =model[0].eval() # load feature extractor _lowerCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ ) # set weights for wav2vec2 encoder _lowerCamelCase : List[Any] =WavaVecaModel(SCREAMING_SNAKE_CASE__ ) recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ ) # load decoder weights _lowerCamelCase : int =MBartForCausalLM(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase , _lowerCamelCase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) _lowerCamelCase : Dict =SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : List[Any] =False _lowerCamelCase : Any =MBartaaTokenizer(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : int =hf_wavavec.config.to_dict() _lowerCamelCase : Optional[int] =tokenizer.pad_token_id _lowerCamelCase : Tuple =tokenizer.bos_token_id _lowerCamelCase : Any =tokenizer.eos_token_id _lowerCamelCase : int ='mbart50' _lowerCamelCase : Tuple ='wav2vec2' _lowerCamelCase : Tuple =tokenizer.eos_token_id _lowerCamelCase : Dict =250_004 _lowerCamelCase : List[str] =tokenizer.eos_token_id _lowerCamelCase : Dict =SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model') parser.add_argument( '--encoder_config_path', default='facebook/wav2vec2-xls-r-1b', type=str, help='Path to hf encoder wav2vec2 checkpoint config', ) parser.add_argument( '--decoder_config_path', default='facebook/mbart-large-50-one-to-many-mmt', type=str, help='Path to hf decoder checkpoint config', ) parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers') parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers') parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers') parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim') parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config') lowerCamelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
464
from math import isqrt def a_ ( SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' _lowerCamelCase : Optional[int] =[True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _lowerCamelCase : str =False return [i for i in range(2 , SCREAMING_SNAKE_CASE__ ) if is_prime[i]] def a_ ( SCREAMING_SNAKE_CASE__ : int = 10**8 ): '''simple docstring''' _lowerCamelCase : Union[str, Any] =calculate_prime_numbers(max_number // 2 ) _lowerCamelCase : Dict =0 _lowerCamelCase : int =0 _lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F"""{solution() = }""")
464
1
'''simple docstring''' def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = [int(__SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(__SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(__SCREAMING_SNAKE_CASE ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCAmelCase = input().strip() UpperCAmelCase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
706
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''spiece.model'''} UpperCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', } } # TODO(PVP) - this should be removed in Transformers v5 UpperCAmelCase = { '''t5-small''': 512, '''t5-base''': 512, '''t5-large''': 512, '''t5-3b''': 512, '''t5-11b''': 512, } UpperCAmelCase = '''▁''' class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : List[Any] = VOCAB_FILES_NAMES _UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ["""input_ids""", """attention_mask"""] def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowercase = [F'''<extra_id_{i}>''' for i in range(snake_case )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens lowercase = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) lowercase = legacy lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , ) lowercase = vocab_file lowercase = extra_ids lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case ) @staticmethod def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , ) return max_model_length @property def SCREAMING_SNAKE_CASE__ ( self ): return self.sp_model.get_piece_size() + self._extra_ids def SCREAMING_SNAKE_CASE__ ( self ): lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(snake_case )) + [1] return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1] def SCREAMING_SNAKE_CASE__ ( self ): return list( set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) ) def SCREAMING_SNAKE_CASE__ ( self ): return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()] def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): lowercase = self._add_eos_if_not_present(snake_case ) if token_ids_a is None: return token_ids_a else: lowercase = self._add_eos_if_not_present(snake_case ) return token_ids_a + token_ids_a def __getstate__( self ): lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self , snake_case ): lowercase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: lowercase = SPIECE_UNDERLINE + text.replace(snake_case , ' ' ) return super().tokenize(snake_case , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ): if not self.legacy: lowercase = text.startswith(snake_case ) if is_first: lowercase = text[1:] lowercase = self.sp_model.encode(snake_case , out_type=snake_case ) if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ): lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if token.startswith('<extra_id_' ): lowercase = re.match(r'<extra_id_(\d+)>' , snake_case ) lowercase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(snake_case ) def SCREAMING_SNAKE_CASE__ ( self , snake_case ): if index < self.sp_model.get_piece_size(): lowercase = self.sp_model.IdToPiece(snake_case ) else: lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = [] lowercase = '' lowercase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case ) + token lowercase = True lowercase = [] else: current_sub_tokens.append(snake_case ) lowercase = False out_string += self.sp_model.decode(snake_case ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ): if not os.path.isdir(snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case ) elif not os.path.isfile(self.vocab_file ): with open(snake_case , 'wb' ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(snake_case ) return (out_vocab_file,)
565
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } SCREAMING_SNAKE_CASE__ = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } SCREAMING_SNAKE_CASE__ = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : Optional[Any] = RoFormerTokenizer def __init__( self : str , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict="[UNK]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : List[str]="[MASK]" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Dict , ): '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __a : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE__ ) != strip_accents ): __a : List[str] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('type' ) ) __a : Optional[Any] = do_lower_case __a : List[str] = strip_accents __a : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) __a : Dict = do_lower_case def __getstate__( self : Optional[int] ): '''simple docstring''' __a : Optional[Any] = self.__dict__.copy() __a : Optional[int] = BertPreTokenizer() return state def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a : Tuple = d __a : Any = self.__dict__['_tokenizer'].get_vocab() __a : str = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] = None ): '''simple docstring''' __a : List[str] = [self.sep_token_id] __a : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int = None ): '''simple docstring''' __a : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , **SCREAMING_SNAKE_CASE__ : List[Any] , ): '''simple docstring''' __a : int = BertPreTokenizer() return super().save_pretrained(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
47
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
0
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def _lowerCAmelCase ( lowerCamelCase__ : int ) -> Dict: _SCREAMING_SNAKE_CASE : str = {} _SCREAMING_SNAKE_CASE : List[str] = job["started_at"] _SCREAMING_SNAKE_CASE : Optional[int] = job["completed_at"] _SCREAMING_SNAKE_CASE : Any = date_parser.parse(__UpperCamelCase ) _SCREAMING_SNAKE_CASE : str = date_parser.parse(__UpperCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _SCREAMING_SNAKE_CASE : Optional[int] = start _SCREAMING_SNAKE_CASE : List[Any] = end _SCREAMING_SNAKE_CASE : Dict = duration_in_min return job_info def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Any]=None ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = None if token is not None: _SCREAMING_SNAKE_CASE : Dict = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''} _SCREAMING_SNAKE_CASE : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _SCREAMING_SNAKE_CASE : str = requests.get(__UpperCamelCase, headers=__UpperCamelCase ).json() _SCREAMING_SNAKE_CASE : Any = {} try: job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} ) _SCREAMING_SNAKE_CASE : Any = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(__UpperCamelCase ): _SCREAMING_SNAKE_CASE : Dict = requests.get(url + f'''&page={i + 2}''', headers=__UpperCamelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} ) return job_time except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": lowercase_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') lowercase_ : str = parser.parse_args() lowercase_ : int = get_job_time(args.workflow_run_id) lowercase_ : Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'{k}: {v["duration"]}')
713
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Optional[int]: def decorator(lowerCamelCase__ : int ): _SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase__, "handle_key", [] ) handle += [key] setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ ) return func return decorator def _lowerCAmelCase ( *lowerCamelCase__ : List[str] ) -> Tuple: def decorator(lowerCamelCase__ : Dict ): _SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCamelCase__, "handle_key", [] ) handle += keys setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ ) return func return decorator class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): def __new__( cls , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ ) if not hasattr(snake_case__ , "key_handler" ): setattr(snake_case__ , "key_handler" , {} ) setattr(snake_case__ , "handle_input" , KeyHandler.handle_input ) for value in attrs.values(): _SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(snake_case__ , "handle_key" , [] ) for key in handled_keys: _SCREAMING_SNAKE_CASE : Tuple = value return new_cls @staticmethod def __SCREAMING_SNAKE_CASE ( cls ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = get_character() if char != KEYMAP["undefined"]: _SCREAMING_SNAKE_CASE : Dict = ord(snake_case__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = cls.key_handler.get(snake_case__ ) if handler: _SCREAMING_SNAKE_CASE : Optional[int] = char return handler(cls ) else: return None def _lowerCAmelCase ( cls : List[Any] ) -> str: return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
295
0
'''simple docstring''' UpperCAmelCase_ : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' UpperCAmelCase_ : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}] UpperCAmelCase_ : Tuple = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
44
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) _lowerCamelCase : Dict = MaskFormerConfig(backbone_config=_lowerCAmelCase ) _lowerCamelCase : Tuple = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok _lowerCamelCase : List[Any] = 847 _lowerCamelCase : str = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok _lowerCamelCase : Optional[int] = 150 _lowerCamelCase : Union[str, Any] = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok _lowerCamelCase : Union[str, Any] = 171 _lowerCamelCase : str = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO _lowerCamelCase : Optional[int] = 133 _lowerCamelCase : Any = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok _lowerCamelCase : str = 19 _lowerCamelCase : Tuple = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok _lowerCamelCase : List[Any] = 65 _lowerCamelCase : Optional[int] = "mapillary-vistas-id2label.json" _lowerCamelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) _lowerCamelCase : Optional[int] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} return config def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" _lowerCamelCase : Any = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') ) rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') ) rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') ) # cross-attention out projection rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') ) # MLP 1 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') ) # MLP 2 rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') ) # layernorm 3 (final layernorm) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') ) rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') ) rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') ) # fmt: on return rename_keys def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Tuple = dct.pop(_lowerCAmelCase ) _lowerCamelCase : str = val def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCamelCase : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCamelCase : Union[str, Any] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' ) _lowerCamelCase : List[str] = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Optional[int] = in_proj_weight[:dim, :] _lowerCamelCase : Optional[int] = in_proj_bias[: dim] _lowerCamelCase : List[str] = in_proj_weight[ dim : dim * 2, : ] _lowerCamelCase : List[Any] = in_proj_bias[ dim : dim * 2 ] _lowerCamelCase : List[Any] = in_proj_weight[ -dim :, : ] _lowerCamelCase : Union[str, Any] = in_proj_bias[-dim :] # fmt: on def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ): """simple docstring""" _lowerCamelCase : int = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCamelCase : Tuple = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' ) _lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Optional[Any] = in_proj_weight[: hidden_size, :] _lowerCamelCase : Optional[int] = in_proj_bias[:config.hidden_size] _lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2] _lowerCamelCase : Any = in_proj_weight[-hidden_size :, :] _lowerCamelCase : Any = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _lowerCamelCase : Optional[int] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' ) _lowerCamelCase : List[Any] = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Tuple = in_proj_weight[: hidden_size, :] _lowerCamelCase : str = in_proj_bias[:config.hidden_size] _lowerCamelCase : str = in_proj_weight[hidden_size : hidden_size * 2, :] _lowerCamelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2] _lowerCamelCase : int = in_proj_weight[-hidden_size :, :] _lowerCamelCase : Optional[Any] = in_proj_bias[-hidden_size :] # fmt: on def A_ ( ): """simple docstring""" _lowerCamelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ): """simple docstring""" _lowerCamelCase : Tuple = get_maskformer_config(_lowerCAmelCase ) # load original state_dict with open(_lowerCAmelCase , "rb" ) as f: _lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase ) _lowerCamelCase : Optional[Any] = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _lowerCamelCase : List[Any] = create_rename_keys(_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_swin_q_k_v(_lowerCAmelCase , config.backbone_config ) read_in_decoder_q_k_v(_lowerCAmelCase , _lowerCAmelCase ) # update to torch tensors for key, value in state_dict.items(): _lowerCamelCase : Dict = torch.from_numpy(_lowerCAmelCase ) # load 🤗 model _lowerCamelCase : int = MaskFormerForInstanceSegmentation(_lowerCAmelCase ) model.eval() for name, param in model.named_parameters(): print(_lowerCAmelCase , param.shape ) _lowerCamelCase , _lowerCamelCase : Union[str, Any] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_lowerCAmelCase ) == 0, F'Unexpected keys: {unexpected_keys}' # verify results _lowerCamelCase : Any = prepare_img() if "vistas" in model_name: _lowerCamelCase : Any = 65 elif "cityscapes" in model_name: _lowerCamelCase : Optional[Any] = 65535 else: _lowerCamelCase : str = 255 _lowerCamelCase : List[str] = True if "ade" in model_name else False _lowerCamelCase : Union[str, Any] = MaskFormerImageProcessor(ignore_index=_lowerCAmelCase , reduce_labels=_lowerCAmelCase ) _lowerCamelCase : int = image_processor(_lowerCAmelCase , return_tensors="pt" ) _lowerCamelCase : Tuple = model(**_lowerCAmelCase ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _lowerCamelCase : Tuple = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(F'nielsr/{model_name}' ) image_processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase_ : int = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
44
1
"""simple docstring""" UpperCamelCase : Optional[int] = 8.31_44_62 # Unit - J mol-1 K-1 def A ( snake_case :float , snake_case :float , snake_case :float ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def A ( snake_case :float , snake_case :float , snake_case :float ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
293
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : List[str] = { "configuration_autoformer": [ "AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "AutoformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ "AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "AutoformerForPrediction", "AutoformerModel", "AutoformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
293
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : Optional[Any] = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] SCREAMING_SNAKE_CASE : str = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } SCREAMING_SNAKE_CASE : str = f"{src_lang}-{tgt_lang}" SCREAMING_SNAKE_CASE : Tuple = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(_a , exist_ok=_a) SCREAMING_SNAKE_CASE : Any = os.path.join(_a , "README.md") print(f"Generating {path}") with open(_a , "w" , encoding="utf-8") as f: f.write(_a) # make sure we are under the root of the project a_ = Path(__file__).resolve().parent.parent.parent a_ = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a_ , a_ , a_ = model_name.split('-') a_ = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
25
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A = input('Enter image url: ').strip() print(F'''Downloading image from {url} ...''') A = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A = soup.find('meta', {'property': 'og:image'})['content'] A = requests.get(image_url).content A = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, 'wb') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
544
0
"""simple docstring""" import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class snake_case ( unittest.TestCase): def a_ ( self : int ) -> Dict: '''simple docstring''' _A = ["a", "b", "c"] # Defaults to last layer if both are None _A , _A = get_aligned_output_features_output_indices(a__ , a__ , a__ ) self.assertEqual(a__ , ["c"] ) self.assertEqual(a__ , [2] ) # Out indices set to match out features _A , _A = get_aligned_output_features_output_indices(["a", "c"] , a__ , a__ ) self.assertEqual(a__ , ["a", "c"] ) self.assertEqual(a__ , [0, 2] ) # Out features set to match out indices _A , _A = get_aligned_output_features_output_indices(a__ , [0, 2] , a__ ) self.assertEqual(a__ , ["a", "c"] ) self.assertEqual(a__ , [0, 2] ) # Out features selected from negative indices _A , _A = get_aligned_output_features_output_indices(a__ , [-3, -1] , a__ ) self.assertEqual(a__ , ["a", "c"] ) self.assertEqual(a__ , [-3, -1] ) def a_ ( self : List[Any] ) -> List[str]: '''simple docstring''' with self.assertRaises(a__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , a__ ) # Out features must be a list with self.assertRaises(a__ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(a__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(a__ ): verify_out_features_out_indices(a__ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(a__ ): verify_out_features_out_indices(a__ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(a__ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(a__ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(a__ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def a_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' _A = BackboneMixin() _A = ["a", "b", "c"] _A = ["a", "c"] _A = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _A = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _A = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
621
"""simple docstring""" import numpy as np def a__ ( __lowercase , __lowercase ) -> np.ndarray: return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
621
1
"""simple docstring""" from math import factorial class snake_case_ : """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_) -> str: UpperCamelCase = real if isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [1] * rank else: UpperCamelCase = rank def __repr__( self) -> Any: return ( F'{self.real}+' F'{"+".join(str(lowerCamelCase_)+"E"+str(n+1)for n,dual in enumerate(self.duals))}' ) def UpperCAmelCase__ ( self) -> Optional[Any]: UpperCamelCase = self.duals.copy() while cur[-1] == 0: cur.pop(-1) return Dual(self.real , lowerCamelCase_) def __add__( self , lowerCamelCase_) -> List[str]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): return Dual(self.real + other , self.duals) UpperCamelCase = self.duals.copy() UpperCamelCase = other.duals.copy() if len(lowerCamelCase_) > len(lowerCamelCase_): o_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_))) elif len(lowerCamelCase_) < len(lowerCamelCase_): s_dual.extend([1] * (len(lowerCamelCase_) - len(lowerCamelCase_))) UpperCamelCase = [] for i in range(len(lowerCamelCase_)): new_duals.append(s_dual[i] + o_dual[i]) return Dual(self.real + other.real , lowerCamelCase_) A_ = __add__ def __sub__( self , lowerCamelCase_) -> str: return self + other * -1 def __mul__( self , lowerCamelCase_) -> Union[str, Any]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i * other) return Dual(self.real * other , lowerCamelCase_) UpperCamelCase = [0] * (len(self.duals) + len(other.duals) + 1) for i, item in enumerate(self.duals): for j, jtem in enumerate(other.duals): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals)): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals)): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , lowerCamelCase_) A_ = __mul__ def __truediv__( self , lowerCamelCase_) -> List[str]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i / other) return Dual(self.real / other , lowerCamelCase_) raise ValueError def __floordiv__( self , lowerCamelCase_) -> Optional[Any]: if not isinstance(lowerCamelCase_ , lowerCamelCase_): UpperCamelCase = [] for i in self.duals: new_duals.append(i // other) return Dual(self.real // other , lowerCamelCase_) raise ValueError def __pow__( self , lowerCamelCase_) -> str: if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_): raise ValueError('''power must be a positive integer''') if n == 0: return 1 if n == 1: return self UpperCamelCase = self for _ in range(n - 1): x *= self return x def __snake_case ( _lowercase ,_lowercase ,_lowercase ): """simple docstring""" if not callable(_lowercase ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(_lowercase ,(float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(_lowercase ,_lowercase ): raise ValueError('''differentiate() requires an int as input for order''' ) UpperCamelCase = Dual(_lowercase ,1 ) UpperCamelCase = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def __snake_case ( _lowercase ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
34
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
import mpmath # for roots of unity import numpy as np class lowerCamelCase_ : '''simple docstring''' def __init__( self , snake_case_=None , snake_case_=None ) -> List[str]: '''simple docstring''' __lowercase = list(poly_a or [0] )[:] __lowercase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() __lowercase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() __lowercase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 __lowercase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform __lowercase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product __lowercase = self.__multiply() def A ( self , snake_case_ ) -> int: '''simple docstring''' __lowercase = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(snake_case_ ) <= 1: return dft[0] # __lowercase = self.c_max_length // 2 while next_ncol > 0: __lowercase = [[] for i in range(snake_case_ )] __lowercase = self.root**next_ncol # First half of next step __lowercase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(snake_case_ ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step __lowercase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(snake_case_ ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update __lowercase = new_dft __lowercase = next_ncol // 2 return dft[0] def A ( self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.__dft('''A''' ) __lowercase = self.__dft('''B''' ) __lowercase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT __lowercase = 2 while next_ncol <= self.c_max_length: __lowercase = [[] for i in range(snake_case_ )] __lowercase = self.root ** (next_ncol // 2) __lowercase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update __lowercase = new_inverse_c next_ncol *= 2 # Unpack __lowercase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self ) -> Dict: '''simple docstring''' __lowercase = '''A = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) ) __lowercase = '''B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) ) __lowercase = '''A*B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product ) ) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
527
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
527
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __lowercase ( unittest.TestCase ): lowercase = inspect.getfile(accelerate.test_utils ) lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) lowercase = ['accelerate', 'launch'] lowercase = Path.home() / '.cache/huggingface/accelerate' lowercase = 'default_config.yaml' lowercase = config_folder / config_file lowercase = config_folder / '_default_config.yaml' lowercase = Path('tests/test_configs' ) @classmethod def __a ( cls : int ) -> str: '''simple docstring''' if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def __a ( cls : Tuple ) -> Tuple: '''simple docstring''' if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def __a ( self : Tuple ) -> Any: '''simple docstring''' lowercase = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def __a ( self : Dict ) -> Any: '''simple docstring''' for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ): with self.subTest(config_file=UpperCamelCase_ ): execute_subprocess_async( self.base_cmd + ['''--config_file''', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() ) def __a ( self : Dict ) -> int: '''simple docstring''' execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() ) class __lowercase ( unittest.TestCase ): lowercase = 'test-tpu' lowercase = 'us-central1-a' lowercase = 'ls' lowercase = ['accelerate', 'tpu-config'] lowercase = 'cd /usr/share' lowercase = 'tests/test_samples/test_command_file.sh' lowercase = 'Running gcloud compute tpus tpu-vm ssh' def __a ( self : Tuple ) -> Any: '''simple docstring''' lowercase = run_command( self.cmd + ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def __a ( self : List[Any] ) -> Optional[int]: '''simple docstring''' lowercase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def __a ( self : Any ) -> List[str]: '''simple docstring''' lowercase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=UpperCamelCase_ ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def __a ( self : int ) -> Tuple: '''simple docstring''' lowercase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def __a ( self : Optional[Any] ) -> List[str]: '''simple docstring''' lowercase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--command''', '''echo \"Hello World\"''', '''--debug''', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all' , UpperCamelCase_ , ) def __a ( self : Tuple ) -> Tuple: '''simple docstring''' lowercase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def __a ( self : int ) -> str: '''simple docstring''' lowercase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/0_12_0.yaml''', '''--command_file''', self.command_file, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug''', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def __a ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' lowercase = run_command( self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def __a ( self : Optional[int] ) -> List[Any]: '''simple docstring''' lowercase = run_command( self.cmd + [ '''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--accelerate_version''', '''12.0.0''', '''--debug''', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , )
604
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() __a : Union[str, Any] = logging.get_logger(__name__) __a : Tuple = "Hello world! cécé herlolip" def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]: """simple docstring""" __A = FairseqRobertaModel.from_pretrained(__lowercase ) roberta.eval() # disable dropout __A = roberta.model.encoder.sentence_encoder __A = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: __A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0] print("""Our RoBERTa config:""" , __lowercase ) __A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase ) model.eval() # Now let's copy all the weights. # Embeddings __A = roberta_sent_encoder.embed_tokens.weight __A = roberta_sent_encoder.embed_positions.weight __A = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __A = roberta_sent_encoder.layer_norm.weight __A = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __A = model.roberta.encoder.layer[i] __A = roberta_sent_encoder.layers[i] __A = layer.attention __A = roberta_layer.self_attn_layer_norm.weight __A = roberta_layer.self_attn_layer_norm.bias # self attention __A = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __A = roberta_layer.self_attn.q_proj.weight __A = roberta_layer.self_attn.q_proj.bias __A = roberta_layer.self_attn.k_proj.weight __A = roberta_layer.self_attn.k_proj.bias __A = roberta_layer.self_attn.v_proj.weight __A = roberta_layer.self_attn.v_proj.bias # self-attention output __A = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __A = roberta_layer.self_attn.out_proj.weight __A = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __A = roberta_layer.final_layer_norm.weight __A = roberta_layer.final_layer_norm.bias # intermediate __A = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __A = roberta_layer.fca.weight __A = roberta_layer.fca.bias # output __A = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __A = roberta_layer.fca.weight __A = roberta_layer.fca.bias # end of layer if classification_head: __A = roberta.model.classification_heads["""mnli"""].dense.weight __A = roberta.model.classification_heads["""mnli"""].dense.bias __A = roberta.model.classification_heads["""mnli"""].out_proj.weight __A = roberta.model.classification_heads["""mnli"""].out_proj.bias else: # LM Head __A = roberta.model.encoder.lm_head.dense.weight __A = roberta.model.encoder.lm_head.dense.bias __A = roberta.model.encoder.lm_head.layer_norm.weight __A = roberta.model.encoder.lm_head.layer_norm.bias __A = roberta.model.encoder.lm_head.weight __A = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1 __A = model(__lowercase )[0] if classification_head: __A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) ) else: __A = roberta.model(__lowercase )[0] print(our_output.shape , their_output.shape ) __A = torch.max(torch.abs(our_output - their_output ) ).item() print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 __A = torch.allclose(__lowercase , __lowercase , atol=1E-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(__lowercase ) if __name__ == "__main__": __a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) __a : Dict = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
637
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = PriorTransformer _SCREAMING_SNAKE_CASE = """hidden_states""" @property def lowercase_ ( self : Tuple ) ->Any: snake_case__ : Dict = 4 snake_case__ : Optional[int] = 8 snake_case__ : int = 7 snake_case__ : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : str = floats_tensor((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : Any, _snake_case : Any=0 ) ->List[str]: torch.manual_seed(_snake_case ) snake_case__ : Tuple = 4 snake_case__ : int = 8 snake_case__ : Dict = 7 snake_case__ : Dict = torch.randn((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowercase_ ( self : List[str] ) ->Dict: return (4, 8) @property def lowercase_ ( self : str ) ->int: return (4, 8) def lowercase_ ( self : Optional[Any] ) ->Optional[int]: snake_case__ : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } snake_case__ : str = self.dummy_input return init_dict, inputs_dict def lowercase_ ( self : List[str] ) ->Union[str, Any]: snake_case__ , snake_case__ : int = PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy', output_loading_info=_snake_case ) self.assertIsNotNone(_snake_case ) self.assertEqual(len(loading_info['missing_keys'] ), 0 ) model.to(_snake_case ) snake_case__ : Tuple = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def lowercase_ ( self : int ) ->Any: snake_case__ , snake_case__ : List[Any] = self.prepare_init_args_and_inputs_for_common() snake_case__ : Optional[int] = self.model_class(**_snake_case ) snake_case__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Tuple = ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2], _snake_case ) def lowercase_ ( self : Optional[int] ) ->List[Any]: snake_case__ : List[Any] = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) snake_case__ : Tuple = model.to(_snake_case ) if hasattr(_snake_case, 'set_default_attn_processor' ): model.set_default_attn_processor() snake_case__ : List[str] = self.get_dummy_seed_input() with torch.no_grad(): snake_case__ : str = model(**_snake_case )[0] snake_case__ : Dict = output[0, :5].flatten().cpu() print(_snake_case ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. snake_case__ : Optional[Any] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] ) self.assertTrue(torch_all_close(_snake_case, _snake_case, rtol=1e-2 ) ) @slow class snake_case__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : Dict, _snake_case : Union[str, Any]=1, _snake_case : Tuple=7_6_8, _snake_case : List[Any]=7_7, _snake_case : Any=0 ) ->Any: torch.manual_seed(_snake_case ) snake_case__ : List[Any] = batch_size snake_case__ : int = embedding_dim snake_case__ : int = num_embeddings snake_case__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case ) snake_case__ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowercase_ ( self : int ) ->List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ] ) def lowercase_ ( self : List[Any], _snake_case : List[Any], _snake_case : int ) ->Tuple: snake_case__ : str = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior', subfolder='prior' ) model.to(_snake_case ) snake_case__ : Any = self.get_dummy_seed_input(seed=_snake_case ) with torch.no_grad(): snake_case__ : Optional[Any] = model(**_snake_case )[0] assert list(sample.shape ) == [1, 7_6_8] snake_case__ : List[Any] = sample[0, :8].flatten().cpu() print(_snake_case ) snake_case__ : int = torch.tensor(_snake_case ) assert torch_all_close(_snake_case, _snake_case, atol=1e-3 )
243
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ :List[Any] = logging.get_logger(__name__) a_ :Union[str, Any] = {"vocab_file": "spiece.model"} a_ :Optional[Any] = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } a_ :str = {"bert_for_seq_generation": 512} class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self : str, _snake_case : str, _snake_case : Optional[Any]="<s>", _snake_case : Tuple="</s>", _snake_case : int="<unk>", _snake_case : List[Any]="<pad>", _snake_case : Dict="<::::>", _snake_case : Optional[Dict[str, Any]] = None, **_snake_case : List[Any], ) ->None: snake_case__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, pad_token=_snake_case, sep_token=_snake_case, sp_model_kwargs=self.sp_model_kwargs, **_snake_case, ) snake_case__ : Optional[int] = vocab_file snake_case__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_snake_case ) @property def lowercase_ ( self : Any ) ->Any: return self.sp_model.get_piece_size() def lowercase_ ( self : List[str] ) ->Any: snake_case__ : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ) ->str: snake_case__ : List[str] = self.__dict__.copy() snake_case__ : Any = None return state def __setstate__( self : str, _snake_case : Dict ) ->int: snake_case__ : Union[str, Any] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): snake_case__ : Dict = {} snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase_ ( self : List[str], _snake_case : str ) ->List[str]: return self.sp_model.encode(_snake_case, out_type=_snake_case ) def lowercase_ ( self : Optional[int], _snake_case : str ) ->Union[str, Any]: return self.sp_model.piece_to_id(_snake_case ) def lowercase_ ( self : Union[str, Any], _snake_case : Union[str, Any] ) ->int: snake_case__ : List[str] = self.sp_model.IdToPiece(_snake_case ) return token def lowercase_ ( self : List[str], _snake_case : Optional[Any] ) ->Any: snake_case__ : int = [] snake_case__ : Any = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_snake_case ) + token snake_case__ : str = [] else: current_sub_tokens.append(_snake_case ) out_string += self.sp_model.decode(_snake_case ) return out_string.strip() def lowercase_ ( self : int, _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[str] = os.path.join( _snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case, 'wb' ) as fi: snake_case__ : Tuple = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,)
243
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 ) -> List[Any]: """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=0 ) -> Tuple: """simple docstring""" snake_case_ : Tuple = [] for old_item in old_list: snake_case_ : str = old_item.replace('''in_layers.0''' , '''norm1''' ) snake_case_ : Dict = new_item.replace('''in_layers.2''' , '''conv1''' ) snake_case_ : Union[str, Any] = new_item.replace('''out_layers.0''' , '''norm2''' ) snake_case_ : str = new_item.replace('''out_layers.3''' , '''conv2''' ) snake_case_ : List[str] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) snake_case_ : List[Any] = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) snake_case_ : Optional[Any] = shave_segments(_UpperCamelCase , n_shave_prefix_segments=_UpperCamelCase ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=0 ) -> Dict: """simple docstring""" snake_case_ : List[Any] = [] for old_item in old_list: snake_case_ : Tuple = old_item snake_case_ : List[Any] = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) snake_case_ : List[Any] = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) snake_case_ : Dict = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) snake_case_ : Union[str, Any] = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) snake_case_ : List[Any] = shave_segments(_UpperCamelCase , n_shave_prefix_segments=_UpperCamelCase ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any: """simple docstring""" assert isinstance(_UpperCamelCase , _UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ : List[Any] = old_checkpoint[path] snake_case_ : Tuple = old_tensor.shape[0] // 3 snake_case_ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ : Dict = old_tensor.shape[0] // config['''num_head_channels'''] // 3 snake_case_ : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ : Any = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ : List[str] = query.reshape(_UpperCamelCase ) snake_case_ : Dict = key.reshape(_UpperCamelCase ) snake_case_ : List[str] = value.reshape(_UpperCamelCase ) for path in paths: snake_case_ : Any = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ : List[Any] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) snake_case_ : Dict = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) snake_case_ : str = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ : str = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ : Dict = old_checkpoint[path['''old''']][:, :, 0] else: snake_case_ : int = old_checkpoint[path['''old''']] def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]: """simple docstring""" snake_case_ : Optional[int] = {} snake_case_ : Tuple = checkpoint['''time_embed.0.weight'''] snake_case_ : Any = checkpoint['''time_embed.0.bias'''] snake_case_ : Optional[Any] = checkpoint['''time_embed.2.weight'''] snake_case_ : List[Any] = checkpoint['''time_embed.2.bias'''] snake_case_ : List[str] = checkpoint['''input_blocks.0.0.weight'''] snake_case_ : Optional[int] = checkpoint['''input_blocks.0.0.bias'''] snake_case_ : Any = checkpoint['''out.0.weight'''] snake_case_ : Union[str, Any] = checkpoint['''out.0.bias'''] snake_case_ : Optional[Any] = checkpoint['''out.2.weight'''] snake_case_ : Union[str, Any] = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only snake_case_ : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) snake_case_ : Optional[Any] = { layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key] for layer_id in range(_UpperCamelCase ) } # Retrieves the keys for the middle blocks only snake_case_ : List[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) snake_case_ : List[Any] = { layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key] for layer_id in range(_UpperCamelCase ) } # Retrieves the keys for the output blocks only snake_case_ : Any = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) snake_case_ : int = { layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key] for layer_id in range(_UpperCamelCase ) } for i in range(1 , _UpperCamelCase ): snake_case_ : Optional[Any] = (i - 1) // (config['''num_res_blocks'''] + 1) snake_case_ : List[Any] = (i - 1) % (config['''num_res_blocks'''] + 1) snake_case_ : Dict = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key] snake_case_ : int = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key] if f'''input_blocks.{i}.0.op.weight''' in checkpoint: snake_case_ : Optional[int] = checkpoint[ f'''input_blocks.{i}.0.op.weight''' ] snake_case_ : Union[str, Any] = checkpoint[ f'''input_blocks.{i}.0.op.bias''' ] continue snake_case_ : Optional[Any] = renew_resnet_paths(_UpperCamelCase ) snake_case_ : Union[str, Any] = {'''old''': f'''input_blocks.{i}.0''', '''new''': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} snake_case_ : Dict = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCamelCase ) if len(_UpperCamelCase ): snake_case_ : Union[str, Any] = renew_attention_paths(_UpperCamelCase ) snake_case_ : int = { '''old''': f'''input_blocks.{i}.1''', '''new''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } snake_case_ : Optional[Any] = { f'''input_blocks.{i}.1.qkv.bias''': { '''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''input_blocks.{i}.1.qkv.weight''': { '''key''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCamelCase , config=_UpperCamelCase , ) snake_case_ : int = middle_blocks[0] snake_case_ : List[str] = middle_blocks[1] snake_case_ : Optional[int] = middle_blocks[2] snake_case_ : List[str] = renew_resnet_paths(_UpperCamelCase ) assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , config=_UpperCamelCase ) snake_case_ : Tuple = renew_resnet_paths(_UpperCamelCase ) assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , config=_UpperCamelCase ) snake_case_ : Optional[Any] = renew_attention_paths(_UpperCamelCase ) snake_case_ : Optional[Any] = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , attention_paths_to_split=_UpperCamelCase , config=_UpperCamelCase ) for i in range(_UpperCamelCase ): snake_case_ : Tuple = i // (config['''num_res_blocks'''] + 1) snake_case_ : List[str] = i % (config['''num_res_blocks'''] + 1) snake_case_ : str = [shave_segments(_UpperCamelCase , 2 ) for name in output_blocks[i]] snake_case_ : Optional[Any] = {} for layer in output_block_layers: snake_case_ , snake_case_ : Any = layer.split('''.''' )[0], shave_segments(_UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_UpperCamelCase ) else: snake_case_ : int = [layer_name] if len(_UpperCamelCase ) > 1: snake_case_ : Tuple = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key] snake_case_ : Any = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key] snake_case_ : List[Any] = renew_resnet_paths(_UpperCamelCase ) snake_case_ : Dict = renew_resnet_paths(_UpperCamelCase ) snake_case_ : Tuple = {'''old''': f'''output_blocks.{i}.0''', '''new''': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ : Optional[Any] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) snake_case_ : Optional[Any] = checkpoint[ f'''output_blocks.{i}.{index}.conv.weight''' ] snake_case_ : Any = checkpoint[ f'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(_UpperCamelCase ) == 2: snake_case_ : str = [] if len(_UpperCamelCase ): snake_case_ : Tuple = renew_attention_paths(_UpperCamelCase ) snake_case_ : str = { '''old''': f'''output_blocks.{i}.1''', '''new''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } snake_case_ : List[str] = { f'''output_blocks.{i}.1.qkv.bias''': { '''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''output_blocks.{i}.1.qkv.weight''': { '''key''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=_UpperCamelCase , ) else: snake_case_ : Union[str, Any] = renew_resnet_paths(_UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ : Dict = '''.'''.join(['''output_blocks''', str(_UpperCamelCase ), path['''old''']] ) snake_case_ : Optional[Any] = '''.'''.join(['''up_blocks''', str(_UpperCamelCase ), '''resnets''', str(_UpperCamelCase ), path['''new''']] ) snake_case_ : List[Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCAmelCase_ = json.loads(f.read()) lowerCAmelCase_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCAmelCase_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCAmelCase_ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) lowerCAmelCase_ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) lowerCAmelCase_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
60
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE : Union[str, Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : List[Any] = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
257
0
'''simple docstring''' import sys def __UpperCamelCase ( __lowerCamelCase : Optional[int] ) -> List[Any]: '''simple docstring''' _a = len(_lowerCamelCase ) _a = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )] _a = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )] for chain_length in range(2 , _lowerCamelCase ): for a in range(1 , n - chain_length + 1 ): _a = a + chain_length - 1 _a = sys.maxsize for c in range(_lowerCamelCase , _lowerCamelCase ): _a = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: _a = cost _a = c return matrix, sol def __UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ) -> Optional[Any]: '''simple docstring''' if i == j: print("A" + str(_lowerCamelCase ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] ) print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase ) print(")" , end=" " ) def __UpperCamelCase ( ) -> Tuple: '''simple docstring''' _a = [30, 35, 15, 5, 10, 20, 25] _a = len(_lowerCamelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 _a = matrix_chain_order(_lowerCamelCase ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(_lowerCamelCase , 1 , n - 1 ) if __name__ == "__main__": main()
717
'''simple docstring''' def __UpperCamelCase ( __lowerCamelCase : int = 400_0000 ) -> int: '''simple docstring''' _a = [] _a , _a = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__lowerCamelCase ) _a , _a = b, a + b return sum(__lowerCamelCase ) if __name__ == "__main__": print(f'''{solution() = }''')
276
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_snake_case : Optional[Any] , **_snake_case : Tuple ) -> Any: '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" a_ : List[str] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _lowerCAmelCase ( self : List[str] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Dict ) -> Optional[int]: '''simple docstring''' a__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) a__ = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def _lowerCAmelCase ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : int ) -> Optional[int]: '''simple docstring''' a__ = vqa_pipeline(_snake_case , top_k=1 ) self.assertEqual( _snake_case , [ [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}], [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}], ] , ) @require_torch def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' a__ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) a__ = './tests/fixtures/tests_samples/COCO/000000039769.png' a__ = 'How many cats are there?' a__ = vqa_pipeline(image=_snake_case , question='How many cats are there?' , top_k=2 ) self.assertEqual( _snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] ) a__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( _snake_case , [{'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}, {'score': ANY(_snake_case ), 'answer': ANY(_snake_case )}] ) @slow @require_torch def _lowerCAmelCase ( self : Dict ) -> str: '''simple docstring''' a__ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) a__ = './tests/fixtures/tests_samples/COCO/000000039769.png' a__ = 'How many cats are there?' a__ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) a__ = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) a__ = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: '''simple docstring''' pass
232
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None ) -> int: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' a__ = nn.Parameter(UpperCAmelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' a__ = nn.Parameter(UpperCAmelCase__ ) def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict: '''simple docstring''' a__ = np.asarray(weights[0] ) a__ = np.asarray(weights[1] ) a__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),) set_param( torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),) set_param( torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),) def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict: '''simple docstring''' a__ = np.asarray(weights[0] ) a__ = np.asarray(weights[1] ) a__ = np.asarray(weights[2] ) a__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),) set_param( torch_layer.self_attention.key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),) set_param( torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),) set_param( torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),) def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ = weights[0][0][0] a__ = np.asarray(layer_norm_a[0] ) a__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),) # lsh weights + output a__ = weights[0][1] if len(UpperCAmelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ ) else: set_layer_weights_in_torch_local(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ ) # intermediate weighs a__ = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCAmelCase__ ) == 4: a__ = intermediate_weights[2] # layernorm 2 a__ = np.asarray(intermediate_weights[0][0] ) a__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),) # intermediate dense a__ = np.asarray(intermediate_weights[1][0] ) a__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),) # intermediate out a__ = np.asarray(intermediate_weights[4][0] ) a__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),) def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> List[Any]: '''simple docstring''' a__ = torch_model.reformer # word embeds a__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings,torch.tensor(UpperCAmelCase__ ),) if isinstance(weights[3],UpperCAmelCase__ ): a__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): a__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' a__ = nn.Parameter(torch.tensor(UpperCAmelCase__ ) ) a__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCAmelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): a__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) # output layer norm a__ = np.asarray(weights[7][0] ) a__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),) # output embeddings a__ = np.asarray(weights[9][0] ) a__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),) def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[int]: '''simple docstring''' a__ = ReformerConfig.from_json_file(UpperCAmelCase__ ) print(f'''Building PyTorch model from configuration: {config}''' ) a__ = ReformerModelWithLMHead(UpperCAmelCase__ ) with open(UpperCAmelCase__,'rb' ) as f: a__ = pickle.load(UpperCAmelCase__ )['weights'] set_model_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(),UpperCAmelCase__ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __magic_name__ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
232
1
from __future__ import annotations from typing import TypedDict class a__ ( _UpperCAmelCase ): '''simple docstring''' _a : str _a : int def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE_ ) )] def _a ( SCREAMING_SNAKE_CASE_ : List[str] ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) __lowerCAmelCase = all_rotations(SCREAMING_SNAKE_CASE_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation __lowerCAmelCase = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(SCREAMING_SNAKE_CASE_ ), } return response def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: __lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(SCREAMING_SNAKE_CASE_ ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) __lowerCAmelCase = [""] * len(SCREAMING_SNAKE_CASE_ ) for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): for i in range(len(SCREAMING_SNAKE_CASE_ ) ): __lowerCAmelCase = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": UpperCamelCase__ = "Provide a string that I will generate its BWT transform: " UpperCamelCase__ = input(entry_msg).strip() UpperCamelCase__ = bwt_transform(s) print( f'''Burrows Wheeler transform for string \'{s}\' results ''' f'''in \'{result["bwt_string"]}\'''' ) UpperCamelCase__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' f'''we get original string \'{original_string}\'''' )
705
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class a__ ( unittest.TestCase ): def __init__( self , _A , _A=7 , _A=3 , _A=1_8 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , ): """simple docstring""" __lowerCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8} __lowerCAmelCase = parent __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = image_size __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = apply_ocr def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class a__ ( snake_case__ , unittest.TestCase ): _a : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = LayoutLMvaImageProcessingTester(self ) @property def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , "do_resize" ) ) self.assertTrue(hasattr(_A , "size" ) ) self.assertTrue(hasattr(_A , "apply_ocr" ) ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} ) __lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , _A ) self.assertIsInstance(encoding.boxes , _A ) # Test batched __lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = LayoutLMvaImageProcessor() from datasets import load_dataset __lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) __lowerCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" ) __lowerCAmelCase = image_processing(_A , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 __lowerCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 __lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _A ) self.assertListEqual(encoding.boxes , _A ) # with apply_OCR = False __lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=_A ) __lowerCAmelCase = image_processing(_A , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
552
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase__ : def __init__( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : Optional[Any]=30 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=32 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Dict=37 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=10 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=2 , ) -> Any: """simple docstring""" lowerCamelCase_ : List[str] = parent lowerCamelCase_ : Tuple = batch_size lowerCamelCase_ : int = image_size lowerCamelCase_ : Any = patch_size lowerCamelCase_ : Union[str, Any] = num_channels lowerCamelCase_ : Optional[Any] = is_training lowerCamelCase_ : List[Any] = use_labels lowerCamelCase_ : List[str] = hidden_size lowerCamelCase_ : Optional[int] = num_hidden_layers lowerCamelCase_ : List[Any] = num_attention_heads lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : Dict = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : int = type_sequence_label_size lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Dict = scope lowerCamelCase_ : Any = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase_ : Tuple = (image_size // patch_size) ** 2 lowerCamelCase_ : Tuple = num_patches + 2 def __UpperCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ : Dict = None if self.use_labels: lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : str ) -> int: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __UpperCamelCase ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : List[Any] = TFDeiTModel(config=UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : List[str] = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ ) lowerCamelCase_ : int = model(UpperCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ : Union[str, Any] = 1 lowerCamelCase_ : Optional[Any] = TFDeiTForMaskedImageModeling(UpperCamelCase__ ) lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : Dict = self.type_sequence_label_size lowerCamelCase_ : str = TFDeiTForImageClassification(UpperCamelCase__ ) lowerCamelCase_ : Dict = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ : str = 1 lowerCamelCase_ : Tuple = TFDeiTForImageClassification(UpperCamelCase__ ) lowerCamelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : str = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = config_and_inputs lowerCamelCase_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ): A = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) A = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) A = False A = False A = False A = False def __UpperCamelCase ( self : int ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : List[str] = TFDeiTModelTester(self ) lowerCamelCase_ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" pass def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Union[str, Any] = model_class(UpperCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) ) def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : List[str] = model_class(UpperCamelCase__ ) lowerCamelCase_ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : Optional[int] = [*signature.parameters.keys()] lowerCamelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ ) def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) def __UpperCamelCase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=False ) -> Any: """simple docstring""" lowerCamelCase_ : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __UpperCamelCase ( self : Dict ) -> Any: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : Union[str, Any] = TFDeiTModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __snake_case (): """simple docstring""" lowerCamelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) lowerCamelCase_ : Optional[Any] = self.default_image_processor lowerCamelCase_ : Tuple = prepare_img() lowerCamelCase_ : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' ) # forward pass lowerCamelCase_ : Dict = model(**UpperCamelCase__ ) # verify the logits lowerCamelCase_ : str = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase_ : str = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
501
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 _UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 _UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _UpperCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ): A = WATERMARK_BITS A = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images] A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 ) A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
699
0
import re def _lowercase ( UpperCAmelCase_): """simple docstring""" if len(re.findall("""[ATCG]""" , UpperCAmelCase_)) != len(UpperCAmelCase_): raise ValueError("""Invalid Strand""") return dna.translate(dna.maketrans("""ATCG""" , """TAGC""")) if __name__ == "__main__": import doctest doctest.testmod()
127
import argparse import os import re import packaging.version lowercase_: str = 'examples/' lowercase_: Any = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } lowercase_: List[str] = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } lowercase_: List[str] = 'README.md' def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_): """simple docstring""" with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f: snake_case__ : List[Any] = f.read() snake_case__ , snake_case__ : Any = REPLACE_PATTERNS[pattern] snake_case__ : Any = replace.replace("""VERSION""" , UpperCAmelCase_) snake_case__ : List[Any] = re_pattern.sub(UpperCAmelCase_ , UpperCAmelCase_) with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f: f.write(UpperCAmelCase_) def _lowercase ( UpperCAmelCase_): """simple docstring""" for folder, directories, fnames in os.walk(UpperCAmelCase_): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""") if "legacy" in directories: directories.remove("""legacy""") for fname in fnames: if fname.endswith(""".py"""): update_version_in_file(os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_ , pattern="""examples""") def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=False): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) if not patch: update_version_in_examples(UpperCAmelCase_) def _lowercase ( ): """simple docstring""" snake_case__ : Any = """🤗 Transformers currently provides the following architectures""" snake_case__ : Optional[int] = """1. Want to contribute a new model?""" with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f: snake_case__ : Union[str, Any] = f.readlines() # Find the start of the list. snake_case__ : List[str] = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 snake_case__ : Tuple = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("""1."""): snake_case__ : str = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f: f.writelines(UpperCAmelCase_) def _lowercase ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""") as f: snake_case__ : str = f.read() snake_case__ : Dict = REPLACE_PATTERNS["""init"""][0].search(UpperCAmelCase_).groups()[0] return packaging.version.parse(UpperCAmelCase_) def _lowercase ( UpperCAmelCase_=False): """simple docstring""" snake_case__ : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""") if default_version.is_devrelease: snake_case__ : Any = default_version.base_version elif patch: snake_case__ : Optional[Any] = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: snake_case__ : Dict = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. snake_case__ : Optional[int] = input(F'Which version are you releasing? [{default_version}]') if len(UpperCAmelCase_) == 0: snake_case__ : List[str] = default_version print(F'Updating version to {version}.') global_version_update(UpperCAmelCase_ , patch=UpperCAmelCase_) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""") clean_main_ref_in_model_list() def _lowercase ( ): """simple docstring""" snake_case__ : List[str] = get_version() snake_case__ : Any = F'{current_version.major}.{current_version.minor + 1}.0.dev0' snake_case__ : List[str] = current_version.base_version # Check with the user we got that right. snake_case__ : Dict = input(F'Which version are we developing now? [{dev_version}]') if len(UpperCAmelCase_) == 0: snake_case__ : Optional[int] = dev_version print(F'Updating version to {version}.') global_version_update(UpperCAmelCase_) print("""Cleaning main README, don't forget to run `make fix-copies`.""") clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_: Dict = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') lowercase_: str = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
127
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['__file__'], _import_structure)
320
"""simple docstring""" import socket def a ( ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM ) UpperCAmelCase_ :int = socket.gethostname() UpperCAmelCase_ :List[Any] = 12312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''', '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: UpperCAmelCase_ :int = sock.recv(1024 ) if not data: break out_file.write(__snake_case ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
608
0
import argparse import struct import unittest class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: Union[str, Any] , __A: bytes ) -> None: _A = data # Initialize hash values _A = [ 0X6_a_0_9_e_6_6_7, 0Xb_b_6_7_a_e_8_5, 0X3_c_6_e_f_3_7_2, 0Xa_5_4_f_f_5_3_a, 0X5_1_0_e_5_2_7_f, 0X9_b_0_5_6_8_8_c, 0X1_f_8_3_d_9_a_b, 0X5_b_e_0_c_d_1_9, ] # Initialize round constants _A = [ 0X4_2_8_a_2_f_9_8, 0X7_1_3_7_4_4_9_1, 0Xb_5_c_0_f_b_c_f, 0Xe_9_b_5_d_b_a_5, 0X3_9_5_6_c_2_5_b, 0X5_9_f_1_1_1_f_1, 0X9_2_3_f_8_2_a_4, 0Xa_b_1_c_5_e_d_5, 0Xd_8_0_7_a_a_9_8, 0X1_2_8_3_5_b_0_1, 0X2_4_3_1_8_5_b_e, 0X5_5_0_c_7_d_c_3, 0X7_2_b_e_5_d_7_4, 0X8_0_d_e_b_1_f_e, 0X9_b_d_c_0_6_a_7, 0Xc_1_9_b_f_1_7_4, 0Xe_4_9_b_6_9_c_1, 0Xe_f_b_e_4_7_8_6, 0X0_f_c_1_9_d_c_6, 0X2_4_0_c_a_1_c_c, 0X2_d_e_9_2_c_6_f, 0X4_a_7_4_8_4_a_a, 0X5_c_b_0_a_9_d_c, 0X7_6_f_9_8_8_d_a, 0X9_8_3_e_5_1_5_2, 0Xa_8_3_1_c_6_6_d, 0Xb_0_0_3_2_7_c_8, 0Xb_f_5_9_7_f_c_7, 0Xc_6_e_0_0_b_f_3, 0Xd_5_a_7_9_1_4_7, 0X0_6_c_a_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_b_7_0_a_8_5, 0X2_e_1_b_2_1_3_8, 0X4_d_2_c_6_d_f_c, 0X5_3_3_8_0_d_1_3, 0X6_5_0_a_7_3_5_4, 0X7_6_6_a_0_a_b_b, 0X8_1_c_2_c_9_2_e, 0X9_2_7_2_2_c_8_5, 0Xa_2_b_f_e_8_a_1, 0Xa_8_1_a_6_6_4_b, 0Xc_2_4_b_8_b_7_0, 0Xc_7_6_c_5_1_a_3, 0Xd_1_9_2_e_8_1_9, 0Xd_6_9_9_0_6_2_4, 0Xf_4_0_e_3_5_8_5, 0X1_0_6_a_a_0_7_0, 0X1_9_a_4_c_1_1_6, 0X1_e_3_7_6_c_0_8, 0X2_7_4_8_7_7_4_c, 0X3_4_b_0_b_c_b_5, 0X3_9_1_c_0_c_b_3, 0X4_e_d_8_a_a_4_a, 0X5_b_9_c_c_a_4_f, 0X6_8_2_e_6_f_f_3, 0X7_4_8_f_8_2_e_e, 0X7_8_a_5_6_3_6_f, 0X8_4_c_8_7_8_1_4, 0X8_c_c_7_0_2_0_8, 0X9_0_b_e_f_f_f_a, 0Xa_4_5_0_6_c_e_b, 0Xb_e_f_9_a_3_f_7, 0Xc_6_7_1_7_8_f_2, ] _A = self.preprocessing(self.data ) self.final_hash() @staticmethod def __A ( __A: bytes ) -> bytes: _A = b'''\x80''' + (b'''\x00''' * (63 - (len(__A ) + 8) % 64)) _A = struct.pack('''>Q''' , (len(__A ) * 8) ) return data + padding + big_endian_integer def __A ( self: Dict ) -> None: # Convert into blocks of 64 bytes _A = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _A = list(struct.unpack('''>16L''' , __A ) ) # add 48 0-ed integers words += [0] * 48 _A ,_A ,_A ,_A ,_A ,_A ,_A ,_A = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array _A = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) _A = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) _A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression _A = self.ror(__A , 6 ) ^ self.ror(__A , 11 ) ^ self.ror(__A , 25 ) _A = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g) _A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 _A = self.ror(__A , 2 ) ^ self.ror(__A , 13 ) ^ self.ror(__A , 22 ) _A = (a & b) ^ (a & c) ^ (b & c) _A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 _A ,_A ,_A ,_A ,_A ,_A ,_A ,_A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) _A = [a, b, c, d, e, f, g, h] # Modify final values _A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] _A = ''''''.join([hex(__A )[2:].zfill(8 ) for value in self.hashes] ) def __A ( self: Optional[int] , __A: int , __A: int ) -> int: return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations) class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Dict ) -> None: import hashlib _A = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() ) def __A ( ): '''simple docstring''' import doctest doctest.testmod() _A = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) _A = parser.parse_args() _A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: _A = f.read() else: _A = bytes(_lowercase , '''utf-8''' ) print(SHAaaa(_lowercase ).hash ) if __name__ == "__main__": main()
62
import itertools import string from collections.abc import Generator, Iterable def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = iter(_lowercase ) while True: _A = tuple(itertools.islice(_lowercase , _lowercase ) ) if not chunk: return yield chunk def __A ( _lowercase ): '''simple docstring''' _A = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _A = '''''' if len(_lowercase ) < 2: return dirty for i in range(len(_lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_lowercase ) & 1: clean += "X" return clean def __A ( _lowercase ): '''simple docstring''' _A = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _A = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_lowercase ) return table def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = prepare_input(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = generate_table(_lowercase ) _A = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_lowercase , 2 ): _A ,_A = divmod(table.index(_lowercase ) , 5 ) _A ,_A = divmod(table.index(_lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
62
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase_ : str = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase_ : List[Any] = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Dict = ["input_ids", "attention_mask"] lowerCAmelCase : Dict = RobertaTokenizer def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="replace" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ): super().__init__( UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , ) A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space: A__ = getattr(UpperCAmelCase_ , pre_tok_state.pop("type" ) ) A__ = add_prefix_space A__ = pre_tok_class(**UpperCAmelCase_ ) A__ = add_prefix_space A__ = 'post_processor' A__ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) if tokenizer_component_instance: A__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A__ = tuple(state["sep"] ) if "cls" in state: A__ = tuple(state["cls"] ) A__ = False if state.get("add_prefix_space" , UpperCAmelCase_ ) != add_prefix_space: A__ = add_prefix_space A__ = True if state.get("trim_offsets" , UpperCAmelCase_ ) != trim_offsets: A__ = trim_offsets A__ = True if changes_to_apply: A__ = getattr(UpperCAmelCase_ , state.pop("type" ) ) A__ = component_class(**UpperCAmelCase_ ) setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ ) @property def __A ( self ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __A ( self , UpperCAmelCase__ ): A__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value A__ = value def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ): A__ = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ): A__ = kwargs.get("is_split_into_words" , UpperCAmelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ): A__ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=None ): A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ): A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
491
from ... import PretrainedConfig __A : int = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __A ( lowerCAmelCase ): lowerCAmelCase_ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP lowerCAmelCase_ : str = "nezha" def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int]=21128 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : Any , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : List[str] = hidden_act lowerCAmelCase : Tuple = intermediate_size lowerCAmelCase : Any = hidden_dropout_prob lowerCAmelCase : Any = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Tuple = max_relative_position lowerCAmelCase : Tuple = type_vocab_size lowerCAmelCase : int = initializer_range lowerCAmelCase : int = layer_norm_eps lowerCAmelCase : List[str] = classifier_dropout lowerCAmelCase : Optional[Any] = use_cache
343
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCamelCase__ = '\\n Text data.\n Second line of data.' lowerCamelCase__ = 'file' @pytest.fixture(scope='''session''' ) def _SCREAMING_SNAKE_CASE( snake_case_ : Dict ) ->str: '''simple docstring''' _lowercase : Tuple = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _lowercase : List[str] = bytes(snake_case_ , '''utf-8''' ) with zstd.open(snake_case_ , '''wb''' ) as f: f.write(snake_case_ ) return path @pytest.fixture def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Any: '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , snake_case_ ) , '''w''' ) as f: f.write(snake_case_ ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Dict ) ->Any: '''simple docstring''' _lowercase : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _lowercase : List[Any] = input_paths[compression_format] _lowercase : Tuple = tmp_path / '''cache''' _lowercase : List[str] = DownloadConfig(cache_dir=snake_case_ , extract_compressed_file=snake_case_ ) _lowercase : str = cached_path(snake_case_ , download_config=snake_case_ ) with open(snake_case_ ) as f: _lowercase : Union[str, Any] = f.read() with open(snake_case_ ) as f: _lowercase : str = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Dict ) ->Union[str, Any]: '''simple docstring''' _lowercase : Union[str, Any] = '''custom_cache''' _lowercase : Optional[int] = '''custom_extracted_dir''' _lowercase : Tuple = tmp_path / '''custom_extracted_path''' if default_extracted: _lowercase : Tuple = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , snake_case_ ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case_ ) ) _lowercase : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _lowercase : Optional[int] = xz_file _lowercase : List[Any] = ( DownloadConfig(extract_compressed_file=snake_case_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case_ ) ) _lowercase : Optional[int] = cached_path(snake_case_ , download_config=snake_case_ ) assert Path(snake_case_ ).parent.parts[-2:] == expected def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] ) ->Union[str, Any]: '''simple docstring''' _lowercase : Optional[Any] = str(Path(snake_case_ ).resolve() ) assert cached_path(snake_case_ ) == text_file # relative path _lowercase : int = str(Path(snake_case_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(snake_case_ ) == text_file def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[int] ) ->int: '''simple docstring''' _lowercase : List[str] = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(snake_case_ ): cached_path(snake_case_ ) # relative path _lowercase : str = '''./__missing_file__.txt''' with pytest.raises(snake_case_ ): cached_path(snake_case_ ) def _SCREAMING_SNAKE_CASE( snake_case_ : List[str] ) ->str: '''simple docstring''' _lowercase : Optional[int] = get_from_cache(F"tmp://{tmpfs_file}" ) with open(snake_case_ ) as f: _lowercase : List[Any] = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ ) def _SCREAMING_SNAKE_CASE( ) ->int: '''simple docstring''' with pytest.raises(snake_case_ ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ ) def _SCREAMING_SNAKE_CASE( snake_case_ : List[Any] ) ->int: '''simple docstring''' _lowercase : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(snake_case_ ): http_get('''https://huggingface.co''' , temp_file=snake_case_ ) with pytest.raises(snake_case_ ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ ) def _SCREAMING_SNAKE_CASE( snake_case_ : Dict ) ->str: '''simple docstring''' _lowercase : Any = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(snake_case_ ): ftp_get('''ftp://huggingface.co''' , temp_file=snake_case_ ) with pytest.raises(snake_case_ ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , snake_case_ ) def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[Any] ) ->str: '''simple docstring''' _lowercase : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(snake_case_ ): fsspec_get('''s3://huggingface.co''' , temp_file=snake_case_ ) with pytest.raises(snake_case_ ): fsspec_head('''s3://huggingface.co''' )
715
'''simple docstring''' def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float: '''simple docstring''' if edge <= 0 or not isinstance(snake_case_ , snake_case_ ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float: '''simple docstring''' if edge <= 0 or not isinstance(snake_case_ , snake_case_ ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
411
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __UpperCamelCase = logging.get_logger(__name__) def UpperCamelCase_( _A :nn.ModuleList , _A :nn.ModuleList , _A :List[int] )-> None: UpperCamelCase__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(_A ) == len(_A ), F'''{len(_A )} != {len(_A )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __UpperCamelCase = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 1_2: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 1_1], 4: [0, 4, 8, 1_1], 6: [0, 2, 4, 7, 9, 1_1], 9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1], 1_2: list(range(1_2)), }, 1_6: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 1_5], 3: [0, 8, 1_5], 4: [0, 5, 1_0, 1_5], 6: [0, 3, 6, 9, 1_2, 1_5], 8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5], 9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5], 1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5], 1_6: list(range(1_6)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __UpperCamelCase = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]}, 1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]}, } def UpperCamelCase_( _A :Union[str, Any] , _A :List[Any] )-> Optional[int]: try: UpperCamelCase__ = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' F''' {n_student}''' ) return list(range(_A ) ) def UpperCamelCase_( _A :Any , _A :str )-> List[int]: if n_student > n_teacher: raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(_A ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def UpperCamelCase_( _A :Union[str, PreTrainedModel] , _A :Union[str, Path] = "student" , _A :Union[int, None] = None , _A :Union[int, None] = None , _A :Union[str, Any]=False , _A :Optional[int]=None , _A :Any=None , **_A :Any , )-> Tuple[PreTrainedModel, List[int], List[int]]: UpperCamelCase__ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(_A , _A ): AutoTokenizer.from_pretrained(_A ).save_pretrained(_A ) # purely for convenience UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(_A ).eval() else: assert isinstance(_A , _A ), F'''teacher must be a model or string got type {type(_A )}''' UpperCamelCase__ = teacher.config.to_diff_dict() try: UpperCamelCase__, UpperCamelCase__ = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: UpperCamelCase__ = teacher_e if d is None: UpperCamelCase__ = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d} ) except AttributeError: # T5 if hasattr(teacher.config , "num_encoder_layers" ): UpperCamelCase__, UpperCamelCase__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: UpperCamelCase__, UpperCamelCase__ = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: UpperCamelCase__ = teacher_e if d is None: UpperCamelCase__ = teacher_d if hasattr(teacher.config , "num_encoder_layers" ): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} ) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(_A ) # Copy weights UpperCamelCase__ = teacher.config_class(**_A ) UpperCamelCase__ = AutoModelForSeqaSeqLM.from_config(_A ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. UpperCamelCase__ = student.load_state_dict(teacher.state_dict() , strict=_A ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save UpperCamelCase__, UpperCamelCase__ = list(range(_A ) ), list(range(_A ) ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' F''' {save_path}''' ) student.save_pretrained(_A ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: UpperCamelCase__ = pick_layers_to_copy(_A , _A ) if d_layers_to_copy is None: UpperCamelCase__ = pick_layers_to_copy(_A , _A ) try: if hasattr( _A , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _A ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _A ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _A ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _A ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , _A ) copy_layers(teacher.decoder.block , student.decoder.block , _A ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) UpperCamelCase__ = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(_A ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
551
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
551
1
"""simple docstring""" from __future__ import annotations _lowerCamelCase = 8.988e9 # units = N * m^s * C^-2 def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Dict = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: __SCREAMING_SNAKE_CASE : int = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE : List[str] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE : Optional[int] = abs(lowercase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE : Tuple = (COULOMBS_CONSTANT * charge_product / abs(lowercase_ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Dict ): '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : Optional[Any] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __SCREAMING_SNAKE_CASE : int = TextDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read() _check_text_dataset(lowercase_ , lowercase_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE : str = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : Any = {'''text''': '''string'''} __SCREAMING_SNAKE_CASE : Tuple = features.copy() if features else default_expected_features __SCREAMING_SNAKE_CASE : int = ( Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __SCREAMING_SNAKE_CASE : List[str] = TextDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read() _check_text_dataset(lowercase_ , lowercase_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : Any = {'''text''': '''string'''} __SCREAMING_SNAKE_CASE : Optional[int] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read() _check_text_dataset(lowercase_ , lowercase_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[Any] ): '''simple docstring''' if issubclass(lowercase_ , lowercase_ ): __SCREAMING_SNAKE_CASE : str = text_path elif issubclass(lowercase_ , lowercase_ ): __SCREAMING_SNAKE_CASE : Any = [text_path] __SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : List[Any] = {'''text''': '''string'''} __SCREAMING_SNAKE_CASE : List[Any] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ ).read() _check_text_dataset(lowercase_ , lowercase_ ) def lowerCAmelCase_ ( lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=("train",) ): '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) for split in splits: __SCREAMING_SNAKE_CASE : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def lowerCAmelCase_ ( lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE : int = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : Optional[int] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader({'''train''': text_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read() _check_text_datasetdict(lowercase_ , lowercase_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __SCREAMING_SNAKE_CASE : str = {'''text''': '''string'''} __SCREAMING_SNAKE_CASE : Optional[Any] = features.copy() if features else default_expected_features __SCREAMING_SNAKE_CASE : Dict = ( Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __SCREAMING_SNAKE_CASE : List[Any] = TextDatasetReader({'''train''': text_path} , features=lowercase_ , cache_dir=lowercase_ ).read() _check_text_datasetdict(lowercase_ , lowercase_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] ): '''simple docstring''' if split: __SCREAMING_SNAKE_CASE : Tuple = {split: text_path} else: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''train''' __SCREAMING_SNAKE_CASE : List[str] = {'''train''': text_path, '''test''': text_path} __SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache''' __SCREAMING_SNAKE_CASE : List[str] = {'''text''': '''string'''} __SCREAMING_SNAKE_CASE : Optional[Any] = TextDatasetReader(lowercase_ , cache_dir=lowercase_ ).read() _check_text_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
401
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase =logging.get_logger(__name__) class __magic_name__ ( lowerCAmelCase ): UpperCAmelCase =["input_features", "attention_mask"] def __init__( self , snake_case=8_0 , snake_case=1_6_0_0_0 , snake_case=8_0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> str: '''simple docstring''' super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case) _UpperCAmelCase : Optional[Any] =num_mel_bins _UpperCAmelCase : Optional[int] =do_ceptral_normalize _UpperCAmelCase : Optional[Any] =normalize_means _UpperCAmelCase : Optional[Any] =normalize_vars _UpperCAmelCase : Tuple =True def lowerCAmelCase ( self , snake_case , ) -> np.ndarray: '''simple docstring''' _UpperCAmelCase : List[Any] =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers _UpperCAmelCase : Dict =torch.from_numpy(snake_case).unsqueeze(0) _UpperCAmelCase : Union[str, Any] =ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate) return features.numpy() @staticmethod def lowerCAmelCase ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray: '''simple docstring''' # make sure we normalize float32 arrays if normalize_means: _UpperCAmelCase : int =x[:input_length].mean(axis=0) _UpperCAmelCase : List[Any] =np.subtract(snake_case , snake_case) if normalize_vars: _UpperCAmelCase : List[Any] =x[:input_length].std(axis=0) _UpperCAmelCase : Optional[int] =np.divide(snake_case , snake_case) if input_length < x.shape[0]: _UpperCAmelCase : Dict =padding_value # make sure array is in float32 _UpperCAmelCase : str =x.astype(np.floataa) return x def lowerCAmelCase ( self , snake_case , snake_case = None) -> List[np.ndarray]: '''simple docstring''' _UpperCAmelCase : str =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value) for x, n in zip(snake_case , snake_case) ] def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" f" {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.') _UpperCAmelCase : Optional[int] =isinstance(snake_case , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") _UpperCAmelCase : List[Any] =is_batched_numpy or ( isinstance(snake_case , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: _UpperCAmelCase : int =[np.asarray(snake_case , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(snake_case , np.ndarray): _UpperCAmelCase : Tuple =np.asarray(snake_case , dtype=np.floataa) elif isinstance(snake_case , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): _UpperCAmelCase : int =raw_speech.astype(np.floataa) # always return batch if not is_batched: _UpperCAmelCase : Dict =[raw_speech] # extract fbank features _UpperCAmelCase : Optional[Any] =[self._extract_fbank_features(snake_case) for waveform in raw_speech] # convert into correct format for padding _UpperCAmelCase : List[str] =BatchFeature({'input_features': features}) _UpperCAmelCase : Any =self.pad( snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , ) # make sure list is in array format _UpperCAmelCase : Dict =padded_inputs.get('input_features') if isinstance(input_features[0] , snake_case): _UpperCAmelCase : Any =[np.asarray(snake_case , dtype=np.floataa) for feature in input_features] _UpperCAmelCase : int =padded_inputs.get('attention_mask') if attention_mask is not None: _UpperCAmelCase : Tuple =[np.asarray(snake_case , dtype=np.intaa) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _UpperCAmelCase : Optional[Any] =( np.array(snake_case , dtype=np.intaa) if self._get_padding_strategies(snake_case , max_length=snake_case) is not PaddingStrategy.DO_NOT_PAD else None ) _UpperCAmelCase : Optional[int] =self.normalize( padded_inputs['input_features'] , attention_mask=snake_case) if return_tensors is not None: _UpperCAmelCase : List[Any] =padded_inputs.convert_to_tensors(snake_case) return padded_inputs
446
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase ={ 'configuration_table_transformer': [ 'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TableTransformerConfig', 'TableTransformerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase =[ 'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TableTransformerForObjectDetection', 'TableTransformerModel', 'TableTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
446
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): _snake_case = yaml.safe_load( '''\ name: "" allow_empty: false allow_empty_text: true subsections: - name: "Dataset Card for X" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: "Table of Contents" allow_empty: false allow_empty_text: false subsections: null - name: "Dataset Description" allow_empty: false allow_empty_text: false subsections: - name: "Dataset Summary" allow_empty: false allow_empty_text: false subsections: null - name: "Supported Tasks and Leaderboards" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null ''' ) _snake_case = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = { '''name''': '''root''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ { '''name''': '''Dataset Card for My Dataset''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [ {'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []}, { '''name''': '''Dataset Description''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Dataset Summary''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': [ { '''name''': '''Extra Ignored Subsection''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], } ], }, { '''name''': '''Supported Tasks and Leaderboards''', '''text''': '''''', '''is_empty_text''': True, '''subsections''': [], }, {'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []}, ], }, ], } ], } _snake_case = '''\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = ( '''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.''' ) _snake_case = '''\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = ( '''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.''' ) _snake_case = '''\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.''' _snake_case = '''\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.''' _snake_case = '''''' _snake_case = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.''' _snake_case = '''\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text ''' _snake_case = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.''' @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: assert ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict() == expected_dict @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> str: with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path='root' ) ) ): UpperCamelCase = ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: with pytest.raises(UpperCAmelCase__ , match=re.escape(expected_error.format(path='root' ) ) ): ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def __lowerCamelCase ( _lowercase ) -> List[Any]: ReadMe.from_string(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ ) @pytest.mark.parametrize( 'readme_md, expected_dict' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = Path(UpperCAmelCase__ ) / 'README.md' with open(UpperCAmelCase__ , 'w+' ) as readme_file: readme_file.write(UpperCAmelCase__ ) UpperCamelCase = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = Path(UpperCAmelCase__ ) / 'README.md' with open(UpperCAmelCase__ , 'w+' ) as readme_file: readme_file.write(UpperCAmelCase__ ) UpperCamelCase = expected_error.format(path=UpperCAmelCase__ ) with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ): UpperCamelCase = ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ) readme.validate() @pytest.mark.parametrize( 'readme_md, expected_error' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = Path(UpperCAmelCase__ ) / 'README.md' with open(UpperCAmelCase__ , 'w+' ) as readme_file: readme_file.write(UpperCAmelCase__ ) UpperCamelCase = expected_error.format(path=UpperCAmelCase__ ) with pytest.raises(UpperCAmelCase__ , match=re.escape(UpperCAmelCase__ ) ): ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ ) @pytest.mark.parametrize( 'readme_md,' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def __lowerCamelCase ( _lowercase ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase = Path(UpperCAmelCase__ ) / 'README.md' with open(UpperCAmelCase__ , 'w+' ) as readme_file: readme_file.write(UpperCAmelCase__ ) ReadMe.from_readme(UpperCAmelCase__ , UpperCAmelCase__ , suppress_parsing_errors=UpperCAmelCase__ )
710
def __lowerCamelCase ( _lowercase ) -> list[int]: UpperCamelCase = [0 for i in range(len(_lowercase ) )] # initialize interval's left pointer and right pointer UpperCamelCase , UpperCamelCase = 0, 0 for i in range(1 , len(_lowercase ) ): # case when current index is inside the interval if i <= right_pointer: UpperCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) UpperCamelCase = min_edge while go_next(_lowercase , _lowercase , _lowercase ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1 return z_result def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool: return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]] def __lowerCamelCase ( _lowercase , _lowercase ) -> int: UpperCamelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string UpperCamelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_lowercase ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
170
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _a : Any = get_tests_dir("fixtures") class _lowercase ( unittest.TestCase ): def a ( self : str ) -> Dict: # A mock response for an HTTP head request to emulate server down __snake_case = mock.Mock() __snake_case = 500 __snake_case = {} __snake_case = HTTPError __snake_case = {} # Download this model to make sure it's in the cache. __snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head: __snake_case = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def a ( self : Dict ) -> Optional[Any]: # This test is for deprecated behavior and can be removed in v5 __snake_case = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class _lowercase ( unittest.TestCase ): @classmethod def a ( cls : Any ) -> int: __snake_case = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def a ( cls : Optional[int] ) -> Union[str, Any]: try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def a ( self : Optional[Any] ) -> List[Any]: __snake_case = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='test-feature-extractor' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def a ( self : Any ) -> Optional[int]: __snake_case = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) __snake_case = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def a ( self : Any ) -> Optional[Any]: CustomFeatureExtractor.register_for_auto_class() __snake_case = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) __snake_case = AutoFeatureExtractor.from_pretrained( f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
56
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin _a : int = get_tests_dir("fixtures/test_sentencepiece.model") _a : Dict = {"target_lang": "fi", "source_lang": "en"} _a : Optional[int] = ">>zh<<" _a : List[str] = "Helsinki-NLP/" if is_torch_available(): _a : List[str] = "pt" elif is_tf_available(): _a : Dict = "tf" else: _a : Union[str, Any] = "jax" @require_sentencepiece class _lowercase ( __lowercase , unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = MarianTokenizer _SCREAMING_SNAKE_CASE : str = False _SCREAMING_SNAKE_CASE : Union[str, Any] = True def a ( self : int ) -> int: super().setUp() __snake_case = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] __snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __snake_case = Path(self.tmpdirname ) save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['vocab'] ) save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['source_spm'] ) copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['target_spm'] ) __snake_case = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> MarianTokenizer: return MarianTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def a ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]: return ( "This is a test", "This is a test", ) def a ( self : int ) -> Optional[Any]: __snake_case = '</s>' __snake_case = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def a ( self : Dict ) -> List[str]: __snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 9 ) def a ( self : List[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def a ( self : Any ) -> Optional[int]: __snake_case = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) __snake_case = en_de_tokenizer(['I am a small frog'] , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __snake_case = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , batch.input_ids[0] ) __snake_case = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) __snake_case = [x.name for x in Path(SCREAMING_SNAKE_CASE_ ).glob('*' )] self.assertIn('source.spm' , SCREAMING_SNAKE_CASE_ ) MarianTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[int] ) -> Any: __snake_case = self.get_tokenizer() __snake_case = tok( ['I am a small frog' * 1000, 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def a ( self : Tuple ) -> Dict: __snake_case = self.get_tokenizer() __snake_case = tok(['I am a tiny frog', 'I am a small frog'] , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def a ( self : int ) -> int: # fmt: off __snake_case = {'input_ids': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , ) def a ( self : Dict ) -> str: __snake_case = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' ) __snake_case = 'Tämä on testi' __snake_case = 'This is a test' __snake_case = [76, 7, 2047, 2] __snake_case = [69, 12, 11, 940, 2] __snake_case = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer(text_target=SCREAMING_SNAKE_CASE_ ).input_ids self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __snake_case = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
56
1
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} a_ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } a_ = { "allenai/led-base-16384": 1_6_3_8_4, } class UpperCAmelCase_ ( lowercase__ ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =LEDTokenizer UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> Tuple: super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) __lowercase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space: __lowercase : List[str] = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) ) __lowercase : Tuple = add_prefix_space __lowercase : str = pre_tok_class(**__lowerCamelCase ) __lowercase : List[str] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __lowercase : Dict = "post_processor" __lowercase : List[Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: __lowercase : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowercase : List[str] = tuple(state['''sep'''] ) if "cls" in state: __lowercase : List[str] = tuple(state['''cls'''] ) __lowercase : str = False if state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space: __lowercase : Any = add_prefix_space __lowercase : Dict = True if state.get('''trim_offsets''' , __lowerCamelCase ) != trim_offsets: __lowercase : int = trim_offsets __lowercase : Any = True if changes_to_apply: __lowercase : Union[str, Any] = getattr(__lowerCamelCase , state.pop('''type''' ) ) __lowercase : Union[str, Any] = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: __lowercase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value __lowercase : Optional[Any] = value def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: __lowercase : Optional[Any] = kwargs.get('''is_split_into_words''' , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: __lowercase : List[Any] = kwargs.get('''is_split_into_words''' , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Any: __lowercase : Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[int]: __lowercase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Dict: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Union[str, Any]: __lowercase : Optional[Any] = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase : Optional[Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase : Any = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase : int = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowerCamelCase ) if needs_to_be_padded: __lowercase : Any = len(__lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase : Optional[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase : Dict = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
705
"""simple docstring""" import copy import random from transformers import CLIPTokenizer class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : int = {} def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: __lowercase : Optional[int] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ''' `placeholder_token` that is not already in the tokenizer.''' ) def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Optional[Any]: __lowercase : Union[str, Any] = [] if num_vec_per_token == 1: self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) output.append(UpperCamelCase_ ) else: __lowercase : List[Any] = [] for i in range(UpperCamelCase_ ): __lowercase : List[str] = placeholder_token + F"""_{i}""" self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) output.append(UpperCamelCase_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) __lowercase : int = output def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Tuple: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowercase : Optional[Any] = [] for i in range(len(UpperCamelCase_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __lowercase : List[Any] = self.token_map[placeholder_token] __lowercase : Optional[Any] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )] if vector_shuffle: __lowercase : int = copy.copy(UpperCamelCase_ ) random.shuffle(UpperCamelCase_ ) __lowercase : Tuple = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) ) return text def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[Any]: return super().__call__( self.replace_placeholder_tokens_in_text( UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> int: return super().encode( self.replace_placeholder_tokens_in_text( UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
523
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]: snake_case__ , snake_case__ = image.size snake_case__ , snake_case__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 snake_case__ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) snake_case__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0 snake_case__ = image[None].transpose(0 , 3 , 1 , 2 ) snake_case__ = torch.from_numpy(__lowerCAmelCase ) return 2.0 * image - 1.0 class __magic_name__ (snake_case_ ): '''simple docstring''' def __init__( self:List[str] , _a:VQModel , _a:UNetaDModel , _a:Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=_a , unet=_a , scheduler=_a ) @torch.no_grad() def __call__( self:int , _a:Union[torch.Tensor, PIL.Image.Image] = None , _a:Optional[int] = 1 , _a:Optional[int] = 1_00 , _a:Optional[float] = 0.0 , _a:Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a:Optional[str] = "pil" , _a:bool = True , ): if isinstance(_a , PIL.Image.Image ): snake_case__ = 1 elif isinstance(_a , torch.Tensor ): snake_case__ = image.shape[0] else: raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}""" ) if isinstance(_a , PIL.Image.Image ): snake_case__ = preprocess(_a ) snake_case__ , snake_case__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image snake_case__ = (batch_size, self.unet.config.in_channels // 2, height, width) snake_case__ = next(self.unet.parameters() ).dtype snake_case__ = randn_tensor(_a , generator=_a , device=self.device , dtype=_a ) snake_case__ = image.to(device=self.device , dtype=_a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_a , device=self.device ) snake_case__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler snake_case__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] snake_case__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) snake_case__ = {} if accepts_eta: snake_case__ = eta for t in self.progress_bar(_a ): # concat latents and low resolution image in the channel dimension. snake_case__ = torch.cat([latents, image] , dim=1 ) snake_case__ = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual snake_case__ = self.unet(_a , _a ).sample # compute the previous noisy sample x_t -> x_t-1 snake_case__ = self.scheduler.step(_a , _a , _a , **_a ).prev_sample # decode the image latents with the VQVAE snake_case__ = self.vqvae.decode(_a ).sample snake_case__ = torch.clamp(_a , -1.0 , 1.0 ) snake_case__ = image / 2 + 0.5 snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
33
import tensorflow as tf from ...tf_utils import shape_list class UpperCamelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=False, **snake_case__ ) -> Optional[int]: """simple docstring""" super().__init__(**snake_case__ ) lowercase_ : Tuple = vocab_size lowercase_ : Union[str, Any] = d_embed lowercase_ : Optional[Any] = d_proj lowercase_ : Optional[Any] = cutoffs + [vocab_size] lowercase_ : Optional[int] = [0] + self.cutoffs lowercase_ : Union[str, Any] = div_val lowercase_ : Union[str, Any] = self.cutoffs[0] lowercase_ : List[str] = len(self.cutoffs ) - 1 lowercase_ : List[str] = self.shortlist_size + self.n_clusters lowercase_ : Tuple = keep_order lowercase_ : Dict = [] lowercase_ : int = [] def snake_case__ ( self, snake_case__ ) -> Optional[int]: """simple docstring""" if self.n_clusters > 0: lowercase_ : List[str] = self.add_weight( shape=(self.n_clusters, self.d_embed), initializer="""zeros""", trainable=snake_case__, name="""cluster_weight""" ) lowercase_ : List[Any] = self.add_weight( shape=(self.n_clusters,), initializer="""zeros""", trainable=snake_case__, name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: lowercase_ : Any = self.add_weight( shape=(self.d_embed, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""", ) self.out_projs.append(snake_case__ ) else: self.out_projs.append(snake_case__ ) lowercase_ : List[str] = self.add_weight( shape=(self.vocab_size, self.d_embed), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", ) lowercase_ : List[str] = self.add_weight( shape=(self.vocab_size,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ : Union[str, Any] = self.d_embed // (self.div_val**i) lowercase_ : int = self.add_weight( shape=(d_emb_i, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""" ) self.out_projs.append(snake_case__ ) lowercase_ : Any = self.add_weight( shape=(r_idx - l_idx, d_emb_i), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", ) lowercase_ : Tuple = self.add_weight( shape=(r_idx - l_idx,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", ) self.out_layers.append((weight, bias) ) super().build(snake_case__ ) @staticmethod def snake_case__ ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]: """simple docstring""" lowercase_ : Dict = x if proj is not None: lowercase_ : List[Any] = tf.einsum("""ibd,ed->ibe""", snake_case__, snake_case__ ) return tf.einsum("""ibd,nd->ibn""", snake_case__, snake_case__ ) + b @staticmethod def snake_case__ ( snake_case__, snake_case__ ) -> List[str]: """simple docstring""" lowercase_ : Optional[int] = shape_list(snake_case__ ) lowercase_ : Optional[Any] = tf.range(lp_size[0], dtype=target.dtype ) lowercase_ : str = tf.stack([r, target], 1 ) return tf.gather_nd(snake_case__, snake_case__ ) def snake_case__ ( self, snake_case__, snake_case__, snake_case__=True, snake_case__=False ) -> Dict: """simple docstring""" lowercase_ : Any = 0 if self.n_clusters == 0: lowercase_ : Union[str, Any] = self._logit(snake_case__, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] ) if target is not None: lowercase_ : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case__, logits=snake_case__ ) lowercase_ : Optional[int] = tf.nn.log_softmax(snake_case__, axis=-1 ) else: lowercase_ : int = shape_list(snake_case__ ) lowercase_ : List[Any] = [] lowercase_ : Tuple = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: lowercase_ : Tuple = (target >= l_idx) & (target < r_idx) lowercase_ : Union[str, Any] = tf.where(snake_case__ ) lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) - l_idx if self.div_val == 1: lowercase_ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx] lowercase_ : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx] else: lowercase_ : Dict = self.out_layers[i][0] lowercase_ : int = self.out_layers[i][1] if i == 0: lowercase_ : Optional[int] = tf.concat([cur_W, self.cluster_weight], 0 ) lowercase_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias], 0 ) lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[0] ) lowercase_ : List[str] = tf.nn.log_softmax(snake_case__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: lowercase_ : int = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Optional[int] = self._gather_logprob(snake_case__, snake_case__ ) else: lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[i] ) lowercase_ : Dict = tf.nn.log_softmax(snake_case__ ) lowercase_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster lowercase_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(snake_case__ ) if target is not None: lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Optional[int] = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Union[str, Any] = self._gather_logprob(snake_case__, snake_case__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(snake_case__, -cur_logprob, shape_list(snake_case__ ) ) lowercase_ : List[str] = tf.concat(snake_case__, axis=-1 ) if target is not None: if return_mean: lowercase_ : Tuple = tf.reduce_mean(snake_case__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(snake_case__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(snake_case__, name=self.name, aggregation="""mean""" if return_mean else """""" ) return out
458
0
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=1E-12): __lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T __lowerCAmelCase = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase, axis=1), a_min=lowerCamelCase)).T return jnp.matmul(lowerCamelCase, norm_emb_a.T) class a__ ( nn.Module ): """simple docstring""" __UpperCamelCase : CLIPConfig __UpperCamelCase : jnp.dtype = jnp.floataa def _snake_case (self ): __lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config ) __lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowercase , dtype=self.dtype ) __lowerCAmelCase = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) __lowerCAmelCase = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) __lowerCAmelCase = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) ) __lowerCAmelCase = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) ) def __call__(self , __lowercase ): __lowerCAmelCase = self.vision_model(__lowercase )[1] __lowerCAmelCase = self.visual_projection(__lowercase ) __lowerCAmelCase = jax_cosine_distance(__lowercase , self.special_care_embeds ) __lowerCAmelCase = jax_cosine_distance(__lowercase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs __lowerCAmelCase = 0.0 __lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment __lowerCAmelCase = jnp.round(__lowercase , 3 ) __lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowercase ) # Use a lower threshold if an image has any special care concept __lowerCAmelCase = is_special_care * 0.0_1 __lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment __lowerCAmelCase = jnp.round(__lowercase , 3 ) __lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class a__ ( __A ): """simple docstring""" __UpperCamelCase : List[str] = CLIPConfig __UpperCamelCase : Optional[Any] = 'clip_input' __UpperCamelCase : Any = FlaxStableDiffusionSafetyCheckerModule def __init__(self , __lowercase , __lowercase = None , __lowercase = 0 , __lowercase = jnp.floataa , __lowercase = True , **__lowercase , ): if input_shape is None: __lowerCAmelCase = (1, 2_24, 2_24, 3) __lowerCAmelCase = self.module_class(config=__lowercase , dtype=__lowercase , **__lowercase ) super().__init__(__lowercase , __lowercase , input_shape=__lowercase , seed=__lowercase , dtype=__lowercase , _do_init=_do_init ) def _snake_case (self , __lowercase , __lowercase , __lowercase = None ): # init input tensor __lowerCAmelCase = jax.random.normal(__lowercase , __lowercase ) __lowerCAmelCase , __lowerCAmelCase = jax.random.split(__lowercase ) __lowerCAmelCase = {'''params''': params_rng, '''dropout''': dropout_rng} __lowerCAmelCase = self.module.init(__lowercase , __lowercase )['''params'''] return random_params def __call__(self , __lowercase , __lowercase = None , ): __lowerCAmelCase = jnp.transpose(__lowercase , (0, 2, 3, 1) ) return self.module.apply( {'''params''': params or self.params} , jnp.array(__lowercase , dtype=jnp.floataa ) , rngs={} , )
721
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : int = MgpstrTokenizer __UpperCamelCase : Optional[Any] = False __UpperCamelCase : Optional[int] = {} __UpperCamelCase : List[Any] = False def _snake_case (self ): super().setUp() # fmt: off __lowerCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) def _snake_case (self , **__lowercase ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''tester''' __lowerCAmelCase = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def _snake_case (self ): pass def _snake_case (self ): __lowerCAmelCase = self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __lowerCAmelCase = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) __lowerCAmelCase = tokenizer.encode([special_token] , add_special_tokens=__lowercase ) self.assertEqual(len(__lowercase ) , 1 ) __lowerCAmelCase = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) self.assertTrue(special_token not in decoded ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(__lowercase ) __lowerCAmelCase = tokenizer.tokenize(__lowercase ) __lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase ) __lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowercase ) self.assertNotEqual(len(__lowercase ) , 0 ) __lowerCAmelCase = tokenizer.decode(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , __lowercase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def _snake_case (self ): pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def _snake_case (self ): pass
474
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[Any] = logging.get_logger(__name__) a : Optional[Any] = { "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class lowercase(lowerCamelCase__ ): __snake_case: int = 'funnel' __snake_case: Any = { 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', } def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=[4, 4, 4] , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1e-9 , __SCREAMING_SNAKE_CASE="mean" , __SCREAMING_SNAKE_CASE="relative_shift" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" a__ = vocab_size a__ = block_sizes a__ = [1] * len(lowercase_ ) if block_repeats is None else block_repeats assert len(lowercase_ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." a__ = num_decoder_layers a__ = d_model a__ = n_head a__ = d_head a__ = d_inner a__ = hidden_act a__ = hidden_dropout a__ = attention_dropout a__ = activation_dropout a__ = initializer_range a__ = initializer_std a__ = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' a__ = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' a__ = attention_type a__ = separate_cls a__ = truncate_seq a__ = pool_q_only super().__init__(**lowercase_ ) @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return sum(self.block_sizes ) @num_hidden_layers.setter def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" return len(self.block_sizes ) @num_blocks.setter def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
273
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version UpperCAmelCase__ : Any = get_logger(__name__) class __lowercase : __UpperCAmelCase = '''dummy_data''' __UpperCAmelCase = '''datasets''' __UpperCAmelCase = False def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = True , lowercase_ = None , ) -> Any: __snake_case = 0 __snake_case = dataset_name __snake_case = cache_dir __snake_case = use_local_dummy_data __snake_case = config # download_callbacks take a single url as input __snake_case = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __snake_case = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __snake_case = str(lowercase_) # to be downloaded __snake_case = None __snake_case = None @property def _a ( self) -> int: if self._dummy_file is None: __snake_case = self.download_dummy_data() return self._dummy_file @property def _a ( self) -> Dict: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name) # structure is dummy / version_name return os.path.join('dummy' , self.version_name) @property def _a ( self) -> Dict: return os.path.join(self.dummy_data_folder , 'dummy_data.zip') def _a ( self) -> List[Any]: __snake_case = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __snake_case = cached_path( lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_) return os.path.join(lowercase_ , self.dummy_file_name) @property def _a ( self) -> Union[str, Any]: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file) @property def _a ( self) -> int: if self._bucket_url is None: __snake_case = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/')) return self._bucket_url @property def _a ( self) -> List[str]: # return full path if its a dir if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1]) def _a ( self , lowercase_ , *lowercase_) -> Any: if self.load_existing_dummy_data: # dummy data is downloaded and tested __snake_case = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __snake_case = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase_ , lowercase_): return self.create_dummy_data_dict(lowercase_ , lowercase_) elif isinstance(lowercase_ , (list, tuple)): return self.create_dummy_data_list(lowercase_ , lowercase_) else: return self.create_dummy_data_single(lowercase_ , lowercase_) def _a ( self , lowercase_ , *lowercase_) -> Union[str, Any]: return self.download_and_extract(lowercase_) def _a ( self , lowercase_ , lowercase_) -> Tuple: return self.download_and_extract(lowercase_) def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[str]: return path def _a ( self) -> Dict: return {} def _a ( self , lowercase_ , lowercase_) -> List[Any]: __snake_case = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase_ , lowercase_): for single_url in single_urls: download_callback(lowercase_) else: __snake_case = single_urls download_callback(lowercase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase_ , lowercase_): __snake_case = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name)) for x in single_urls] else: __snake_case = single_urls __snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_).name)) __snake_case = value # make sure that values are unique if all(isinstance(lowercase_ , lowercase_) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values()): # append key to value to make its name unique __snake_case = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _a ( self , lowercase_ , lowercase_) -> Dict: __snake_case = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __snake_case = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase_)) for url in data_url) __snake_case = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url) if data_url and (is_tf_records or is_pubmed_records): __snake_case = [data_url[0]] * len(lowercase_) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split('/')[-1])) dummy_data_list.append(lowercase_) return dummy_data_list def _a ( self , lowercase_ , lowercase_) -> Optional[Any]: for download_callback in self.download_callbacks: download_callback(lowercase_) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __snake_case = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split('/')[-1])) if os.path.exists(lowercase_) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _a ( self) -> List[Any]: pass def _a ( self) -> str: pass def _a ( self , lowercase_) -> List[Any]: def _iter_archive_members(lowercase_): # this preserves the order of the members inside the ZIP archive __snake_case = Path(self.dummy_file).parent __snake_case = path.relative_to(lowercase_) with ZipFile(self.local_path_to_dummy_data) as zip_file: __snake_case = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(lowercase_) __snake_case = Path(lowercase_) __snake_case = _iter_archive_members(lowercase_) if self.use_local_dummy_data else path.rglob('*') for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__')): yield file_path.relative_to(lowercase_).as_posix(), file_path.open('rb') def _a ( self , lowercase_) -> int: if not isinstance(lowercase_ , lowercase_): __snake_case = [paths] for path in paths: if os.path.isfile(lowercase_): if os.path.basename(lowercase_).startswith(('.', '__')): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase_): if os.path.basename(lowercase_).startswith(('.', '__')): continue dirnames.sort() for filename in sorted(lowercase_): if filename.startswith(('.', '__')): continue yield os.path.join(lowercase_ , lowercase_)
313
0
from collections import deque def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> str: __UpperCamelCase : Dict = len(lowercase_ ) __UpperCamelCase : str = deque() __UpperCamelCase : str = [False for _ in range(lowercase_ )] __UpperCamelCase : List[Any] = [-1 for _ in range(lowercase_ )] __UpperCamelCase : Any = index_of[:] def strong_connect(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ): __UpperCamelCase : List[Any] = index # the number when this node is seen __UpperCamelCase : List[str] = index # lowest rank node reachable from here index += 1 stack.append(lowercase_ ) __UpperCamelCase : int = True for w in g[v]: if index_of[w] == -1: __UpperCamelCase : int = strong_connect(lowercase_ , lowercase_ , lowercase_ ) __UpperCamelCase : Dict = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __UpperCamelCase : Any = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __UpperCamelCase : str = [] __UpperCamelCase : Dict = stack.pop() __UpperCamelCase : Dict = False component.append(lowercase_ ) while w != v: __UpperCamelCase : List[str] = stack.pop() __UpperCamelCase : Tuple = False component.append(lowercase_ ) components.append(lowercase_ ) return index __UpperCamelCase : Tuple = [] for v in range(lowercase_ ): if index_of[v] == -1: strong_connect(lowercase_ , 0 , lowercase_ ) return components def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> Optional[int]: __UpperCamelCase : List[Any] = [[] for _ in range(lowercase_ )] for u, v in edges: g[u].append(lowercase_ ) return g if __name__ == "__main__": # Test UpperCamelCase = 7 UpperCamelCase = [0, 0, 1, 2, 3, 3, 4, 4, 6] UpperCamelCase = [1, 3, 2, 0, 1, 4, 5, 6, 5] UpperCamelCase = [(u, v) for u, v in zip(source, target)] UpperCamelCase = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
707
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
515
0
'''simple docstring''' UpperCAmelCase__ : Tuple = 6_55_21 def A ( UpperCamelCase_ : str ) -> int: '''simple docstring''' lowerCAmelCase__ = 1 lowerCAmelCase__ = 0 for plain_chr in plain_text: lowerCAmelCase__ = (a + ord(UpperCamelCase_ )) % MOD_ADLER lowerCAmelCase__ = (b + a) % MOD_ADLER return (b << 16) | a
48
from typing import List from .keymap import KEYMAP, get_character def _snake_case ( __snake_case ): def decorator(__snake_case ): _UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] ) handle += [key] setattr(__snake_case , '''handle_key''' , __snake_case ) return func return decorator def _snake_case ( *__snake_case ): def decorator(__snake_case ): _UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] ) handle += keys setattr(__snake_case , '''handle_key''' , __snake_case ) return func return decorator class lowerCAmelCase_ ( __lowercase ): def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ): _UpperCamelCase = super().__new__(cls , _A , _A , _A ) if not hasattr(_A , '''key_handler''' ): setattr(_A , '''key_handler''' , {} ) setattr(_A , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): _UpperCamelCase = getattr(_A , '''handle_key''' , [] ) for key in handled_keys: _UpperCamelCase = value return new_cls @staticmethod def UpperCamelCase_ ( cls : str ): _UpperCamelCase = get_character() if char != KEYMAP["undefined"]: _UpperCamelCase = ord(_A ) _UpperCamelCase = cls.key_handler.get(_A ) if handler: _UpperCamelCase = char return handler(cls ) else: return None def _snake_case ( cls ): return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
10
0
SCREAMING_SNAKE_CASE : Tuple = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" SCREAMING_SNAKE_CASE : Dict = [{"type": "code", "content": INSTALL_CONTENT}] SCREAMING_SNAKE_CASE : List[str] = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
354
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: _lowercase : Any = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) _lowercase : Any = DatasetInfosDict.from_directory(lowerCamelCase_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: _lowercase : Union[str, Any] = str(lowerCamelCase_ ) dataset_info.write_to_directory(lowerCamelCase_ ) _lowercase : List[str] = DatasetInfo.from_directory(lowerCamelCase_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCamelCase_ , 'dataset_info.json' ) ) def UpperCamelCase_( ) -> int: _lowercase : Tuple = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _lowercase : Optional[int] = dataset_info._to_yaml_dict() assert sorted(lowerCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _lowercase : str = yaml.safe_dump(lowerCamelCase_ ) _lowercase : str = yaml.safe_load(lowerCamelCase_ ) assert dataset_info_yaml_dict == reloaded def UpperCamelCase_( ) -> int: _lowercase : Tuple = DatasetInfo() _lowercase : Tuple = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict: _lowercase : Tuple = str(lowerCamelCase_ ) dataset_infos_dict.write_to_directory(lowerCamelCase_ ) _lowercase : Tuple = DatasetInfosDict.from_directory(lowerCamelCase_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _lowercase : Any = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _lowercase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCamelCase_ , 'README.md' ) )
354
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : str = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' ) _lowercase : Tuple = { 'input_ids': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute" 'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } _lowercase : Optional[int] = model(_lowerCAmelCase )['last_hidden_state'] _lowercase : Union[str, Any] = tf.TensorShape((1, 6, 7_6_8) ) self.assertEqual(output.shape , _lowerCAmelCase ) # compare the actual values for a slice. _lowercase : List[Any] = tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
66
'''simple docstring''' import re from filelock import FileLock try: import nltk UpperCAmelCase__ = True except (ImportError, ModuleNotFoundError): UpperCAmelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ): """simple docstring""" re.sub('<n>','',_SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
186
0
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) a_ = """\ Text data. Second line of data.""" a_ = """file""" @pytest.fixture(scope='session' ) def UpperCAmelCase_ ( __a : str ): '''simple docstring''' _lowerCamelCase : List[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') _lowerCamelCase : Optional[Any] = bytes(__a , 'utf-8' ) with zstd.open(__a , 'wb' ) as f: f.write(__a ) return path @pytest.fixture def UpperCAmelCase_ ( __a : str ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , __a ) , 'w' ) as f: f.write(__a ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def UpperCAmelCase_ ( __a : List[Any] , __a : Optional[int] , __a : List[str] , __a : Union[str, Any] , __a : Tuple , __a : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} _lowerCamelCase : List[str] = input_paths[compression_format] _lowerCamelCase : int = tmp_path / 'cache' _lowerCamelCase : List[Any] = DownloadConfig(cache_dir=__a , extract_compressed_file=__a ) _lowerCamelCase : Union[str, Any] = cached_path(__a , download_config=__a ) with open(__a ) as f: _lowerCamelCase : str = f.read() with open(__a ) as f: _lowerCamelCase : Union[str, Any] = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def UpperCAmelCase_ ( __a : List[Any] , __a : int , __a : str , __a : Tuple , __a : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = 'custom_cache' _lowerCamelCase : Optional[Any] = 'custom_extracted_dir' _lowerCamelCase : Any = tmp_path / 'custom_extracted_path' if default_extracted: _lowerCamelCase : Union[str, Any] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __a ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__a ) ) _lowerCamelCase : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _lowerCamelCase : Tuple = xz_file _lowerCamelCase : str = ( DownloadConfig(extract_compressed_file=__a ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__a ) ) _lowerCamelCase : Any = cached_path(__a , download_config=__a ) assert Path(__a ).parent.parts[-2:] == expected def UpperCAmelCase_ ( __a : Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = str(Path(__a ).resolve() ) assert cached_path(__a ) == text_file # relative path _lowerCamelCase : List[str] = str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__a ) == text_file def UpperCAmelCase_ ( __a : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : str = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(__a ): cached_path(__a ) # relative path _lowerCamelCase : Union[str, Any] = './__missing_file__.txt' with pytest.raises(__a ): cached_path(__a ) def UpperCAmelCase_ ( __a : Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = get_from_cache(f"tmp://{tmpfs_file}" ) with open(__a ) as f: _lowerCamelCase : Any = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , __a ) def UpperCAmelCase_ ( ): '''simple docstring''' with pytest.raises(__a ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __a ) def UpperCAmelCase_ ( __a : str ): '''simple docstring''' _lowerCamelCase : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__a ): http_get('https://huggingface.co' , temp_file=__a ) with pytest.raises(__a ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __a ) def UpperCAmelCase_ ( __a : int ): '''simple docstring''' _lowerCamelCase : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__a ): ftp_get('ftp://huggingface.co' , temp_file=__a ) with pytest.raises(__a ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , __a ) def UpperCAmelCase_ ( __a : Dict ): '''simple docstring''' _lowerCamelCase : Dict = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(__a ): fsspec_get('s3://huggingface.co' , temp_file=__a ) with pytest.raises(__a ): fsspec_head('s3://huggingface.co' )
349
"""simple docstring""" import os from datetime import datetime as dt from github import Github a_ = [ """good first issue""", """feature request""", """wip""", ] def UpperCAmelCase_ ( ): '''simple docstring''' _lowerCamelCase : str = Github(os.environ['GITHUB_TOKEN'] ) _lowerCamelCase : Optional[Any] = g.get_repo('huggingface/accelerate' ) _lowerCamelCase : Optional[int] = repo.get_issues(state='open' ) for issue in open_issues: _lowerCamelCase : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) _lowerCamelCase : Tuple = comments[0] if len(__a ) > 0 else None _lowerCamelCase : Optional[Any] = dt.utcnow() _lowerCamelCase : int = (current_time - issue.updated_at).days _lowerCamelCase : Tuple = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
349
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase: def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE : str=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=9_9 , SCREAMING_SNAKE_CASE : List[str]=3_2 , SCREAMING_SNAKE_CASE : List[Any]=5 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : str=1_2_8 , SCREAMING_SNAKE_CASE : str=3_2 , SCREAMING_SNAKE_CASE : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE : List[str]=2 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Tuple=None , ) -> List[Any]: '''simple docstring''' __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any: '''simple docstring''' __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = ids_tensor([self.batch_size] , self.num_choices ) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = self.prepare_config_and_inputs() __snake_case = True __snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ) -> Any: '''simple docstring''' __snake_case = NezhaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) __snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) __snake_case = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]: '''simple docstring''' __snake_case = True __snake_case = NezhaModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , ) __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , ) __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: '''simple docstring''' __snake_case = NezhaForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> int: '''simple docstring''' __snake_case = NezhaForNextSentencePrediction(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> Tuple: '''simple docstring''' __snake_case = NezhaForPreTraining(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: '''simple docstring''' __snake_case = NezhaForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ) -> Any: '''simple docstring''' __snake_case = self.num_labels __snake_case = NezhaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case = self.num_labels __snake_case = NezhaForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ) -> str: '''simple docstring''' __snake_case = self.num_choices __snake_case = NezhaForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): snake_case_ : List[Any] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) snake_case_ : int = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) snake_case_ : List[str] = True def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=False ) -> List[Any]: '''simple docstring''' __snake_case = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): __snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase ) __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : str ) -> int: '''simple docstring''' __snake_case = NezhaModelTester(self ) __snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict: '''simple docstring''' ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __snake_case = None self.model_tester.create_and_check_model_as_decoder( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]: '''simple docstring''' __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = NezhaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple: '''simple docstring''' __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __snake_case = True __snake_case = model_class(config=__lowerCAmelCase ) __snake_case = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = torch.jit.trace( __lowerCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , "bert.pt" ) ) __snake_case = torch.jit.load(os.path.join(__lowerCAmelCase , "bert.pt" ) , map_location=__lowerCAmelCase ) loaded(inputs_dict["input_ids"].to(__lowerCAmelCase ) , inputs_dict["attention_mask"].to(__lowerCAmelCase ) ) @require_torch class UpperCamelCase( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __snake_case = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] __snake_case = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) __snake_case = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict: '''simple docstring''' __snake_case = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __snake_case = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __snake_case = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] __snake_case = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , __lowerCAmelCase ) __snake_case = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
371
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
356
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class _lowerCamelCase : def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=0.9 , lowerCAmelCase=None , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Tuple= parent SCREAMING_SNAKE_CASE__: Union[str, Any]= batch_size SCREAMING_SNAKE_CASE__: List[str]= image_size SCREAMING_SNAKE_CASE__: List[Any]= num_channels SCREAMING_SNAKE_CASE__: str= patch_size SCREAMING_SNAKE_CASE__: Optional[int]= tubelet_size SCREAMING_SNAKE_CASE__: Dict= num_frames SCREAMING_SNAKE_CASE__: Optional[Any]= is_training SCREAMING_SNAKE_CASE__: Dict= use_labels SCREAMING_SNAKE_CASE__: Tuple= hidden_size SCREAMING_SNAKE_CASE__: Any= num_hidden_layers SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads SCREAMING_SNAKE_CASE__: Dict= intermediate_size SCREAMING_SNAKE_CASE__: str= hidden_act SCREAMING_SNAKE_CASE__: str= hidden_dropout_prob SCREAMING_SNAKE_CASE__: str= attention_probs_dropout_prob SCREAMING_SNAKE_CASE__: Tuple= type_sequence_label_size SCREAMING_SNAKE_CASE__: List[str]= initializer_range SCREAMING_SNAKE_CASE__: str= mask_ratio SCREAMING_SNAKE_CASE__: Any= scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame SCREAMING_SNAKE_CASE__: Tuple= (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__: Dict= (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos SCREAMING_SNAKE_CASE__: str= int(mask_ratio * self.seq_length ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[Any]= floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__: List[str]= None if self.use_labels: SCREAMING_SNAKE_CASE__: List[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__: Dict= self.get_config() return config, pixel_values, labels def UpperCamelCase_ ( self ) -> List[str]: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Any= VideoMAEModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Any= VideoMAEForPreTraining(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch SCREAMING_SNAKE_CASE__: str= torch.ones((self.num_masks,) ) SCREAMING_SNAKE_CASE__: str= torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) SCREAMING_SNAKE_CASE__: List[str]= mask.expand(self.batch_size , -1 ).bool() SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , lowerCAmelCase ) # model only returns predictions for masked patches SCREAMING_SNAKE_CASE__: int= mask.sum().item() SCREAMING_SNAKE_CASE__: Dict= 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[Any]= self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= config_and_inputs SCREAMING_SNAKE_CASE__: List[Any]= {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __a = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False def UpperCamelCase_ ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= VideoMAEModelTester(self ) SCREAMING_SNAKE_CASE__: Tuple= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]: SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(lowerCAmelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch SCREAMING_SNAKE_CASE__: str= torch.ones((self.model_tester.num_masks,) ) SCREAMING_SNAKE_CASE__: int= torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) SCREAMING_SNAKE_CASE__: int= mask.expand(self.model_tester.batch_size , -1 ).bool() SCREAMING_SNAKE_CASE__: Union[str, Any]= bool_masked_pos.to(lowerCAmelCase ) if return_labels: if model_class in [ *get_values(lowerCAmelCase ), ]: SCREAMING_SNAKE_CASE__: Dict= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def UpperCamelCase_ ( self ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def UpperCamelCase_ ( self ) -> List[str]: pass def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__: Optional[Any]= model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__: Dict= model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__: Union[str, Any]= [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__: Tuple= ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def UpperCamelCase_ ( self ) -> str: SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase ) @slow def UpperCamelCase_ ( self ) -> Optional[Any]: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__: int= VideoMAEModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def UpperCamelCase_ ( self ) -> int: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__: int= True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__: Dict= self.model_tester.seq_length - self.model_tester.num_masks SCREAMING_SNAKE_CASE__: Union[str, Any]= ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) SCREAMING_SNAKE_CASE__: Union[str, Any]= True SCREAMING_SNAKE_CASE__: Optional[Any]= False SCREAMING_SNAKE_CASE__: Optional[int]= True SCREAMING_SNAKE_CASE__: Union[str, Any]= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__: int= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__: Any= outputs.attentions self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__: Optional[Any]= True SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__: Dict= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.attentions self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= len(lowerCAmelCase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__: Optional[int]= True SCREAMING_SNAKE_CASE__: Optional[Any]= True SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__: Optional[Any]= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__: Optional[int]= outputs.attentions self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCamelCase_ ( self ) -> Tuple: def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): SCREAMING_SNAKE_CASE__: int= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__: Dict= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__: Any= outputs.hidden_states SCREAMING_SNAKE_CASE__: int= self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: str= self.model_tester.seq_length - self.model_tester.num_masks SCREAMING_SNAKE_CASE__: Optional[int]= num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__: List[str]= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__: Optional[Any]= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase_ ( self ) -> str: pass def A__ ( ): SCREAMING_SNAKE_CASE__: Optional[int]= hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__: List[str]= np.load(snake_case_ ) return list(snake_case_ ) @require_torch @require_vision class _lowerCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Dict= VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= self.default_image_processor SCREAMING_SNAKE_CASE__: Any= prepare_video() SCREAMING_SNAKE_CASE__: List[Any]= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__: Union[str, Any]= model(**lowerCAmelCase ) # verify the logits SCREAMING_SNAKE_CASE__: Any= torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) ) @slow def UpperCamelCase_ ( self ) -> List[str]: SCREAMING_SNAKE_CASE__: int= VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= self.default_image_processor SCREAMING_SNAKE_CASE__: Dict= prepare_video() SCREAMING_SNAKE_CASE__: int= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase ) # add boolean mask, indicating which patches to mask SCREAMING_SNAKE_CASE__: List[Any]= hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) SCREAMING_SNAKE_CASE__: List[Any]= torch.load(lowerCAmelCase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__: str= model(**lowerCAmelCase ) # verify the logits SCREAMING_SNAKE_CASE__: Tuple= torch.Size([1, 1408, 1536] ) SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor( [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowerCAmelCase ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) SCREAMING_SNAKE_CASE__: str= torch.tensor([0.5142] , device=lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) SCREAMING_SNAKE_CASE__: Dict= VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase ).to( lowerCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE__: List[Any]= model(**lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor(torch.tensor([0.6469] ) , device=lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) )
107
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> str: SCREAMING_SNAKE_CASE__: List[Any]= [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: List[str]= [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: str= [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> str: SCREAMING_SNAKE_CASE__: List[str]= [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[int]= [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE__: Any= '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Any: SCREAMING_SNAKE_CASE__: List[str]= [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE__: Dict= '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Dict: # pass variant but use the non-variant filenames SCREAMING_SNAKE_CASE__: List[str]= [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE__: Union[str, Any]= '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__: List[Any]= '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> List[str]: SCREAMING_SNAKE_CASE__: int= [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE__: Any= '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Optional[Any]: # pass variant but use the non-variant filenames SCREAMING_SNAKE_CASE__: Optional[int]= [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE__: str= '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Tuple= [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE__: str= '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
107
1
"""simple docstring""" from pathlib import Path import fire def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" _UpperCamelCase : List[Any] = Path(lowercase_ ) _UpperCamelCase : int = Path(lowercase_ ) dest_dir.mkdir(exist_ok=lowercase_ ) for path in src_dir.iterdir(): _UpperCamelCase : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n] _UpperCamelCase : Union[str, Any] = dest_dir.joinpath(path.name ) print(lowercase_ ) dest_path.open("w" ).write("\n".join(lowercase_ ) ) if __name__ == "__main__": fire.Fire(minify)
624
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Optional[int] = ["pixel_values"] def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : List[Any] , ) -> None: super().__init__(**__a ) _UpperCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 224} _UpperCamelCase : Optional[int] = get_size_dict(__a , default_to_square=__a ) _UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} _UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a , param_name="crop_size" ) _UpperCamelCase : Optional[Any] = do_resize _UpperCamelCase : Dict = size _UpperCamelCase : Any = resample _UpperCamelCase : Tuple = do_center_crop _UpperCamelCase : str = crop_size _UpperCamelCase : Any = do_rescale _UpperCamelCase : Dict = rescale_factor _UpperCamelCase : int = do_normalize _UpperCamelCase : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCamelCase : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCamelCase : List[Any] = do_convert_rgb def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray: _UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _UpperCamelCase : Dict = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: _UpperCamelCase : List[str] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: return rescale(__a , scale=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray: return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : Union[str, Any] , ) -> PIL.Image.Image: _UpperCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize _UpperCamelCase : int = size if size is not None else self.size _UpperCamelCase : int = get_size_dict(__a , param_name="size" , default_to_square=__a ) _UpperCamelCase : List[Any] = resample if resample is not None else self.resample _UpperCamelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size _UpperCamelCase : List[Any] = get_size_dict(__a , param_name="crop_size" , default_to_square=__a ) _UpperCamelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean _UpperCamelCase : List[Any] = image_std if image_std is not None else self.image_std _UpperCamelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCamelCase : Any = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCamelCase : int = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. _UpperCamelCase : List[Any] = [to_numpy_array(__a ) for image in images] if do_resize: _UpperCamelCase : Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: _UpperCamelCase : Optional[Any] = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: _UpperCamelCase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: _UpperCamelCase : List[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] _UpperCamelCase : List[Any] = [to_channel_dimension_format(__a , __a ) for image in images] _UpperCamelCase : str = {"pixel_values": images} return BatchFeature(data=__a , tensor_type=__a )
624
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _UpperCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1) _UpperCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class lowerCAmelCase : UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 class lowerCAmelCase : def __init__( self : Any , UpperCAmelCase : Iterable[int] ) -> None: lowerCamelCase__ : Node | None = None for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ): lowerCamelCase__ : Tuple = Node(UpperCAmelCase , self.head ) def __iter__( self : Optional[int] ) -> Iterator[int]: lowerCamelCase__ : str = self.head while node: yield node.data lowerCamelCase__ : List[str] = node.next_node def __len__( self : List[str] ) -> int: return sum(1 for _ in self ) def __str__( self : Any ) -> str: return " -> ".join([str(UpperCAmelCase ) for node in self] ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList: return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Optional[Any] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
188
_UpperCAmelCase : List[Any] = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) _UpperCAmelCase : Dict = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float: lowerCamelCase__ : Dict = from_type.lower().strip('s' ) lowerCamelCase__ : Dict = to_type.lower().strip('s' ) lowerCamelCase__ : Dict = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : str = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCamelCase__ : List[Any] = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_UpperCAmelCase )}""" ) raise ValueError(_UpperCAmelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCamelCase__ : Optional[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_UpperCAmelCase )}""" ) raise ValueError(_UpperCAmelCase ) lowerCamelCase__ : Any = METRIC_CONVERSION[from_sanitized] lowerCamelCase__ : Optional[int] = METRIC_CONVERSION[to_sanitized] lowerCamelCase__ : List[str] = 1 if from_exponent > to_exponent: lowerCamelCase__ : Dict = from_exponent - to_exponent else: lowerCamelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , _UpperCAmelCase ) if __name__ == "__main__": from doctest import testmod testmod()
188
1