code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __a = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> Dict: require_version(deps[pkg] , _lowerCAmelCase )
35
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
1
'''simple docstring''' from collections.abc import Sequence def __snake_case( _lowerCAmelCase = None ) -> int: if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) snake_case__ : Union[str, Any] = nums[0] for i in range(1 , len(_lowerCAmelCase ) ): snake_case__ : List[Any] = nums[i] snake_case__ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __a = int(input("Enter number of elements : ").strip()) __a = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
35
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
1
'''simple docstring''' import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) __a = "bert-base-cased" __a = "fp16" __a = "bf16" __a = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[int] ): super().setUp() snake_case__ : Optional[Any] = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def lowerCamelCase ( self : Union[str, Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(snake_case_ ): snake_case__ : Union[str, Any] = self.dist_env.copy() snake_case__ : Union[str, Any] = f"{i + 1}" snake_case__ : str = strategy with mockenv_context(**snake_case_ ): snake_case__ : List[str] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def lowerCamelCase ( self : List[str] ): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(snake_case_ ): snake_case__ : Optional[int] = self.dist_env.copy() snake_case__ : List[str] = prefetch_policy with mockenv_context(**snake_case_ ): snake_case__ : Optional[Any] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def lowerCamelCase ( self : Dict ): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(snake_case_ ): snake_case__ : Dict = self.dist_env.copy() snake_case__ : Optional[Any] = state_dict_type with mockenv_context(**snake_case_ ): snake_case__ : Dict = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def lowerCamelCase ( self : Tuple ): snake_case__ : Tuple = AutoModel.from_pretrained(snake_case_ ) for policy in FSDP_AUTO_WRAP_POLICY: snake_case__ : Union[str, Any] = self.dist_env.copy() snake_case__ : Optional[Any] = policy if policy == "TRANSFORMER_BASED_WRAP": snake_case__ : Any = """BertLayer""" elif policy == "SIZE_BASED_WRAP": snake_case__ : Optional[int] = """2000""" with mockenv_context(**snake_case_ ): snake_case__ : str = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case_ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) snake_case__ : Tuple = self.dist_env.copy() snake_case__ : str = """TRANSFORMER_BASED_WRAP""" snake_case__ : Union[str, Any] = """T5Layer""" with mockenv_context(**snake_case_ ): snake_case__ : Union[str, Any] = FullyShardedDataParallelPlugin() with self.assertRaises(snake_case_ ) as cm: fsdp_plugin.set_auto_wrap_policy(snake_case_ ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) snake_case__ : List[str] = self.dist_env.copy() snake_case__ : List[Any] = """SIZE_BASED_WRAP""" snake_case__ : List[Any] = """0""" with mockenv_context(**snake_case_ ): snake_case__ : Dict = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(snake_case_ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def lowerCamelCase ( self : Optional[int] ): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: snake_case__ : int = self.dist_env.copy() snake_case__ : List[str] = mp_dtype with mockenv_context(**snake_case_ ): snake_case__ : Optional[Any] = Accelerator() if mp_dtype == "fp16": snake_case__ : int = torch.floataa elif mp_dtype == "bf16": snake_case__ : Tuple = torch.bfloataa snake_case__ : List[Any] = MixedPrecision(param_dtype=snake_case_ , reduce_dtype=snake_case_ , buffer_dtype=snake_case_ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case_ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , snake_case_ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(snake_case_ ) def lowerCamelCase ( self : Tuple ): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: snake_case__ : Optional[Any] = self.dist_env.copy() snake_case__ : Optional[int] = str(snake_case_ ).lower() with mockenv_context(**snake_case_ ): snake_case__ : int = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case_ ) ) @require_fsdp @require_multi_gpu @slow class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : List[str] ): super().setUp() snake_case__ : Dict = 0.82 snake_case__ : List[str] = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] snake_case__ : Optional[int] = { """multi_gpu_fp16""": 3_200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2_000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1_900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } snake_case__ : List[str] = 160 snake_case__ : Any = 160 snake_case__ : Any = inspect.getfile(accelerate.test_utils ) snake_case__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def lowerCamelCase ( self : Any ): snake_case__ : Tuple = os.path.join(self.test_scripts_folder , """test_performance.py""" ) snake_case__ : Any = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: snake_case__ : Tuple = cmd.copy() for i, strategy in enumerate(snake_case_ ): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case_ , env=os.environ.copy() ) def lowerCamelCase ( self : List[Any] ): snake_case__ : List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) snake_case__ : Dict = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(snake_case_ ): snake_case__ : str = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) if strategy != "FULL_SHARD": continue snake_case__ : Tuple = len(snake_case_ ) for state_dict_type in FSDP_STATE_DICT_TYPE: snake_case__ : Tuple = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case_ , env=os.environ.copy() ) snake_case__ : Dict = cmd_config[:-1] snake_case__ : Any = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case_ , env=os.environ.copy() ) def lowerCamelCase ( self : List[str] ): snake_case__ : List[Any] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) snake_case__ : Dict = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): snake_case__ : Tuple = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(snake_case_ ): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={i+1}" ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(snake_case_ , env=os.environ.copy() )
35
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __snake_case( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=None , _lowerCAmelCase="no" , _lowerCAmelCase="29500" ) -> int: snake_case__ : List[str] = False snake_case__ : Tuple = False if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ): snake_case__ : List[str] = True elif "IPython" in sys.modules: snake_case__ : int = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() ) try: snake_case__ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , _lowerCAmelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """ """your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if num_processes is None: snake_case__ : Union[str, Any] = 8 snake_case__ : str = PrepareForLaunch(_lowerCAmelCase , distributed_type="""TPU""" ) print(f"Launching a training on {num_processes} TPU cores." ) xmp.spawn(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on one CPU.""" ) function(*_lowerCAmelCase ) else: if num_processes is None: raise ValueError( """You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( """To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """ """inside your training function. Restart your notebook and make sure no cells initializes an """ """`Accelerator`.""" ) if torch.cuda.is_initialized(): raise ValueError( """To launch a multi-GPU training from your notebook, you need to avoid running any instruction """ """using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """ """function.""" ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port=_lowerCAmelCase , mixed_precision=_lowerCAmelCase ): snake_case__ : Optional[Any] = PrepareForLaunch(_lowerCAmelCase , distributed_type="""MULTI_GPU""" ) print(f"Launching training on {num_processes} GPUs." ) try: start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( """CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """ """This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """ """Please review your imports and test them when running the `notebook_launcher()` to identify """ """which one is problematic.""" ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case__ : Tuple = """1""" print("""Launching training on MPS.""" ) elif torch.cuda.is_available(): print("""Launching training on one GPU.""" ) else: print("""Launching training on CPU.""" ) function(*_lowerCAmelCase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=() , _lowerCAmelCase=2 ) -> Tuple: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCAmelCase , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ): snake_case__ : Tuple = PrepareForLaunch(_lowerCAmelCase , debug=_lowerCAmelCase ) start_processes(_lowerCAmelCase , args=_lowerCAmelCase , nprocs=_lowerCAmelCase , start_method="""fork""" )
35
'''simple docstring''' import string from math import logaa def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]: snake_case__ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' snake_case__ : Any = corpus_without_punctuation.split("""\n""" ) snake_case__ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase )) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float: return round(tf * idf , 3 )
35
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCAmelCase_ : """simple docstring""" def __init__( self : Dict ): snake_case__ : Optional[int] = """""" snake_case__ : List[Any] = """""" snake_case__ : List[str] = [] snake_case__ : str = 0 snake_case__ : Union[str, Any] = 256 snake_case__ : Union[str, Any] = 0 snake_case__ : Any = 0 snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = 0 def lowerCamelCase ( self : Any , snake_case_ : Union[str, Any] ): snake_case__ : Dict = cva.imread(snake_case_ , 0 ) snake_case__ : str = copy.deepcopy(self.img ) snake_case__ , snake_case__ , snake_case__ : List[str] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) snake_case__ : str = np.sum(snake_case_ ) for i in range(len(snake_case_ ) ): snake_case__ : str = x[i] / self.k self.sk += prk snake_case__ : int = (self.L - 1) * self.sk if self.rem != 0: snake_case__ : List[Any] = int(last % last ) snake_case__ : Tuple = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case_ ) snake_case__ : Tuple = int(np.ma.count(self.img ) / self.img[1].size ) snake_case__ : int = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): snake_case__ : Tuple = self.img[j][i] if num != self.last_list[num]: snake_case__ : str = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def lowerCamelCase ( self : Optional[int] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def lowerCamelCase ( self : Optional[int] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": __a = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __a = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
35
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
1
'''simple docstring''' from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json", } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "autoformer" lowercase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self : List[Any] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "student_t" , snake_case_ : str = "nll" , snake_case_ : int = 1 , snake_case_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case_ : bool = True , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : Optional[List[int]] = None , snake_case_ : Optional[List[int]] = None , snake_case_ : int = 64 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 32 , snake_case_ : int = 32 , snake_case_ : str = "gelu" , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : int = 100 , snake_case_ : float = 0.02 , snake_case_ : bool = True , snake_case_ : Optional[Any]=True , snake_case_ : int = 10 , snake_case_ : int = 25 , snake_case_ : int = 3 , **snake_case_ : Optional[int] , ): # time series specific configuration snake_case__ : Optional[Any] = prediction_length snake_case__ : List[str] = context_length if context_length is not None else prediction_length snake_case__ : List[str] = distribution_output snake_case__ : Dict = loss snake_case__ : List[str] = input_size snake_case__ : Any = num_time_features snake_case__ : Any = lags_sequence snake_case__ : List[Any] = scaling snake_case__ : List[Any] = num_dynamic_real_features snake_case__ : Union[str, Any] = num_static_real_features snake_case__ : List[str] = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(snake_case_ ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) snake_case__ : Union[str, Any] = cardinality else: snake_case__ : Union[str, Any] = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(snake_case_ ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) snake_case__ : Union[str, Any] = embedding_dimension else: snake_case__ : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] snake_case__ : List[str] = num_parallel_samples # Transformer architecture configuration snake_case__ : str = input_size * len(self.lags_sequence ) + self._number_of_features snake_case__ : Optional[Any] = d_model snake_case__ : Optional[Any] = encoder_attention_heads snake_case__ : str = decoder_attention_heads snake_case__ : Any = encoder_ffn_dim snake_case__ : int = decoder_ffn_dim snake_case__ : Tuple = encoder_layers snake_case__ : Dict = decoder_layers snake_case__ : List[str] = dropout snake_case__ : Optional[Any] = attention_dropout snake_case__ : List[Any] = activation_dropout snake_case__ : Tuple = encoder_layerdrop snake_case__ : Tuple = decoder_layerdrop snake_case__ : int = activation_function snake_case__ : str = init_std snake_case__ : str = use_cache # Autoformer snake_case__ : Optional[Any] = label_length snake_case__ : Dict = moving_average snake_case__ : Tuple = autocorrelation_factor super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ ) @property def lowerCamelCase ( self : List[str] ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
35
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""" ) def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = 10 snake_case__ : List[str] = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""" ) ), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ), """answers""": datasets.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), """id""": datasets.Value("""int64""" ), } ) snake_case__ : Dict = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10, """id""": list(range(_lowerCAmelCase ) ), } , features=_lowerCAmelCase , ) return dataset @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: snake_case__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" ) dataset.map(cache_file_name=_lowerCAmelCase ) return filename # FILE_CONTENT + files __a = "\\n Text data.\n Second line of data." @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Any: snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt""" snake_case__ : Optional[Any] = FILE_CONTENT with open(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase ) return filename @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Dict: import bza snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2""" snake_case__ : Optional[Any] = bytes(_lowerCAmelCase , """utf-8""" ) with bza.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> int: import gzip snake_case__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" ) snake_case__ : Dict = bytes(_lowerCAmelCase , """utf-8""" ) with gzip.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: if datasets.config.LZ4_AVAILABLE: import lza.frame snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4""" snake_case__ : Tuple = bytes(_lowerCAmelCase , """utf-8""" ) with lza.frame.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if datasets.config.PY7ZR_AVAILABLE: import pyazr snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z""" with pyazr.SevenZipFile(_lowerCAmelCase , """w""" ) as archive: archive.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: import tarfile snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar""" with tarfile.TarFile(_lowerCAmelCase , """w""" ) as f: f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Dict: import lzma snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz""" snake_case__ : Any = bytes(_lowerCAmelCase , """utf-8""" ) with lzma.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: import zipfile snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Dict: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst""" snake_case__ : Optional[int] = bytes(_lowerCAmelCase , """utf-8""" ) with zstd.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.xml""" snake_case__ : Any = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase ) return filename __a = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] __a = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] __a = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } __a = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] __a = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope="""session""" ) def __snake_case( ) -> int: return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : Union[str, Any] = datasets.Dataset.from_dict(_lowerCAmelCase ) snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" ) dataset.map(cache_file_name=_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" ) with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con: snake_case__ : List[str] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" ) for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" ) with open(_lowerCAmelCase , """w""" , newline="""""" ) as f: snake_case__ : List[str] = csv.DictWriter(_lowerCAmelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" ) with open(_lowerCAmelCase , """w""" , newline="""""" ) as f: snake_case__ : Optional[Any] = csv.DictWriter(_lowerCAmelCase , fieldnames=["""col_1""", """col_2""", """col_3"""] ) writer.writeheader() for item in DATA: writer.writerow(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: import bza snake_case__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2""" with open(_lowerCAmelCase , """rb""" ) as f: snake_case__ : List[Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(_lowerCAmelCase , """wb""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) ) f.write(_lowerCAmelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" ) snake_case__ : List[Any] = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), } ) with open(_lowerCAmelCase , """wb""" ) as f: snake_case__ : int = pq.ParquetWriter(_lowerCAmelCase , schema=_lowerCAmelCase ) snake_case__ : List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowerCAmelCase ) )] for k in DATA[0]} , schema=_lowerCAmelCase ) writer.write_table(_lowerCAmelCase ) writer.close() return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) snake_case__ : List[str] = {"""data""": DATA} with open(_lowerCAmelCase , """w""" ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" ) snake_case__ : List[str] = {"""data""": DATA_DICT_OF_LISTS} with open(_lowerCAmelCase , """w""" ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in DATA: f.write(json.dumps(_lowerCAmelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in DATA: f.write(json.dumps(_lowerCAmelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in DATA_312: f.write(json.dumps(_lowerCAmelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in DATA_STR: f.write(json.dumps(_lowerCAmelCase ) + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: import gzip snake_case__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" ) with open(_lowerCAmelCase , """rb""" ) as orig_file: with gzip.open(_lowerCAmelCase , """wb""" ) as zipped_file: zipped_file.writelines(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: import gzip snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" ) with open(_lowerCAmelCase , """rb""" ) as orig_file: with gzip.open(_lowerCAmelCase , """wb""" ) as zipped_file: zipped_file.writelines(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: snake_case__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.join("""nested""" , os.path.basename(_lowerCAmelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar""" with tarfile.TarFile(_lowerCAmelCase , """w""" ) as f: f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) f.add(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar""" with tarfile.TarFile(_lowerCAmelCase , """w""" ) as f: f.add(_lowerCAmelCase , arcname=os.path.join("""nested""" , os.path.basename(_lowerCAmelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Union[str, Any] = ["""0""", """1""", """2""", """3"""] snake_case__ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Tuple = ["""0""", """1""", """2""", """3"""] snake_case__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" ) with open(_lowerCAmelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : Any = ["""0""", """1""", """2""", """3"""] snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc""" with open(_lowerCAmelCase , """w""" ) as f: for item in data: f.write(item + """\n""" ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: snake_case__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) f.write(_lowerCAmelCase , arcname=os.path.join("""main_dir""" , os.path.basename(_lowerCAmelCase ) ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename("""unsupported.ext""" ) ) f.write(_lowerCAmelCase , arcname=os.path.basename("""unsupported_2.ext""" ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : List[str] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] ) snake_case__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(_lowerCAmelCase ) return path @pytest.fixture(scope="""session""" ) def __snake_case( ) -> str: return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" ) @pytest.fixture(scope="""session""" ) def __snake_case( ) -> str: return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" ) @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip""" with zipfile.ZipFile(_lowerCAmelCase , """w""" ) as f: f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ) ) f.write(_lowerCAmelCase , arcname=os.path.basename(_lowerCAmelCase ).replace(""".jpg""" , """2.jpg""" ) ) return path @pytest.fixture(scope="""session""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = tmp_path_factory.mktemp("""data_dir""" ) (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f: f.write("""foo\n""" * 10 ) with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f: f.write("""bar\n""" * 10 ) return data_dir
35
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : List[str] = 0.01 with locka.acquire(): with pytest.raises(_lowerCAmelCase ): snake_case__ : str = time.time() locka.acquire(_lowerCAmelCase ) assert time.time() - _start > timeout def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = """a""" * 1_000 + """.lock""" snake_case__ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(_lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCAmelCase ): locka.acquire(0 )
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { "configuration_xlm_roberta": [ "XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig", "XLMRobertaOnnxConfig", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["XLMRobertaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["XLMRobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", "XLMRobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaxXLMRobertaForMaskedLM", "FlaxXLMRobertaForCausalLM", "FlaxXLMRobertaForMultipleChoice", "FlaxXLMRobertaForQuestionAnswering", "FlaxXLMRobertaForSequenceClassification", "FlaxXLMRobertaForTokenClassification", "FlaxXLMRobertaModel", "FlaxXLMRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __snake_case( ) -> List[str]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __a = logging.get_logger(__name__) @add_end_docstrings( _a , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : GenericTensor ): if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": snake_case__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ) else: raise ValueError("""Unsupported framework""" ) return masked_index def lowerCamelCase ( self : Optional[Any] , snake_case_ : GenericTensor ): snake_case__ : List[Any] = self.get_masked_index(snake_case_ ) snake_case__ : List[Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , ) def lowerCamelCase ( self : Tuple , snake_case_ : GenericTensor ): if isinstance(snake_case_ , snake_case_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case_ ) def lowerCamelCase ( self : List[Any] , snake_case_ : Any , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ): if return_tensors is None: snake_case__ : Tuple = self.framework snake_case__ : Optional[Any] = self.tokenizer(snake_case_ , return_tensors=snake_case_ ) self.ensure_exactly_one_mask_token(snake_case_ ) return model_inputs def lowerCamelCase ( self : str , snake_case_ : str ): snake_case__ : Union[str, Any] = self.model(**snake_case_ ) snake_case__ : Dict = model_inputs["""input_ids"""] return model_outputs def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str=5 , snake_case_ : List[Any]=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: snake_case__ : Any = target_ids.shape[0] snake_case__ : List[Any] = model_outputs["""input_ids"""][0] snake_case__ : Optional[Any] = model_outputs["""logits"""] if self.framework == "tf": snake_case__ : Optional[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] snake_case__ : Optional[int] = outputs.numpy() snake_case__ : Optional[int] = outputs[0, masked_index, :] snake_case__ : Optional[int] = stable_softmax(snake_case_ , axis=-1 ) if target_ids is not None: snake_case__ : Optional[Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) ) snake_case__ : Optional[int] = tf.expand_dims(snake_case_ , 0 ) snake_case__ : int = tf.math.top_k(snake_case_ , k=snake_case_ ) snake_case__ , snake_case__ : Any = topk.values.numpy(), topk.indices.numpy() else: snake_case__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample snake_case__ : Tuple = outputs[0, masked_index, :] snake_case__ : Tuple = logits.softmax(dim=-1 ) if target_ids is not None: snake_case__ : List[str] = probs[..., target_ids] snake_case__ , snake_case__ : List[str] = probs.topk(snake_case_ ) snake_case__ : Tuple = [] snake_case__ : List[str] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): snake_case__ : Union[str, Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place snake_case__ : Dict = input_ids.numpy().copy() if target_ids is not None: snake_case__ : Any = target_ids[p].tolist() snake_case__ : Union[str, Any] = p # Filter padding out: snake_case__ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back snake_case__ : str = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) snake_case__ : Union[str, Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(snake_case_ ) result.append(snake_case_ ) if single_mask: return result[0] return result def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : str=None ): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Union[str, Any] = [targets] try: snake_case__ : Any = self.tokenizer.get_vocab() except Exception: snake_case__ : str = {} snake_case__ : List[Any] = [] for target in targets: snake_case__ : List[str] = vocab.get(snake_case_ , snake_case_ ) if id_ is None: snake_case__ : int = self.tokenizer( snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )["""input_ids"""] if len(snake_case_ ) == 0: logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " """We cannot replace it with anything meaningful, ignoring it""" ) continue snake_case__ : Optional[Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"The specified target token `{target}` does not exist in the model vocabulary. " f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." ) target_ids.append(id_ ) snake_case__ : Optional[Any] = list(set(snake_case_ ) ) if len(snake_case_ ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) snake_case__ : Dict = np.array(snake_case_ ) return target_ids def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None ): snake_case__ : Union[str, Any] = {} if targets is not None: snake_case__ : List[str] = self.get_target_ids(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = target_ids if top_k is not None: snake_case__ : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self : List[str] , snake_case_ : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : List[Any] ): snake_case__ : Optional[int] = super().__call__(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1: return outputs[0] return outputs
35
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[Any] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : Dict = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000_000 ) -> int: snake_case__ : int = limit + 1 snake_case__ : Dict = [0] * limit for first_term in range(1 , _lowerCAmelCase ): for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): snake_case__ : str = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a snake_case__ : List[str] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
35
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = IFImgaImgSuperResolutionPipeline lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} ) lowercase = PipelineTesterMixin.required_optional_params - {"latents"} def lowerCamelCase ( self : str ): return self._get_superresolution_dummy_components() def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[str]=0 ): if str(snake_case_ ).startswith("""mps""" ): snake_case__ : List[str] = torch.manual_seed(snake_case_ ) else: snake_case__ : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) snake_case__ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) snake_case__ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowerCamelCase ( self : int ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowerCamelCase ( self : Optional[int] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCamelCase ( self : List[Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCamelCase ( self : Dict ): self._test_save_load_local() def lowerCamelCase ( self : Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
35
'''simple docstring''' import numpy as np from transformers import Pipeline def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase ) snake_case__ : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ): snake_case__ : Optional[int] = {} if "second_text" in kwargs: snake_case__ : Union[str, Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : List[Any] ): snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy() snake_case__ : List[str] = softmax(snake_case_ ) snake_case__ : List[str] = np.argmax(snake_case_ ) snake_case__ : List[str] = self.model.config.idalabel[best_class] snake_case__ : Optional[int] = probabilities[best_class].item() snake_case__ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __snake_case( _lowerCAmelCase ) -> Any: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __snake_case( _lowerCAmelCase ) -> List[str]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(R"| /\ | |- | |- |--| |\ /| |-") print(R"|/ \| |- |_ |_ |__| | \/ | |_") __a = 1 while K: __a = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) __a = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] __a = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
35
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __a = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" __a = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" __a = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return float((preds == labels).mean() ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Any = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : int = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase ) ) return { "accuracy": acc, "f1": fa, } def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Dict = np.array(_lowerCAmelCase ) snake_case__ : Any = np.array(_lowerCAmelCase ) snake_case__ : Any = en_sentvecs.shape[0] # mean centering snake_case__ : Tuple = en_sentvecs - np.mean(_lowerCAmelCase , axis=0 ) snake_case__ : Dict = in_sentvecs - np.mean(_lowerCAmelCase , axis=0 ) snake_case__ : List[str] = cdist(_lowerCAmelCase , _lowerCAmelCase , """cosine""" ) snake_case__ : List[Any] = np.array(range(_lowerCAmelCase ) ) snake_case__ : Tuple = sim.argsort(axis=1 )[:, :10] snake_case__ : List[str] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def lowerCamelCase ( self : str ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" ) if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""" ) ), """references""": datasets.Value("""int64""" ) if self.config_name != """cvit-mkb-clsr""" else datasets.Sequence(datasets.Value("""float32""" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , ) def lowerCamelCase ( self : str , snake_case_ : Dict , snake_case_ : Optional[int] ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(snake_case_ , snake_case_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(snake_case_ , snake_case_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(snake_case_ , snake_case_ )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """ """\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """ """\"wiki-ner\"]""" )
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' from __future__ import annotations def __snake_case( _lowerCAmelCase ) -> float: if not nums: raise ValueError("""List is empty""" ) return sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
1
'''simple docstring''' from __future__ import annotations class UpperCAmelCase_ : """simple docstring""" def __init__( self : Optional[Any] , snake_case_ : int ): snake_case__ : List[Any] = data snake_case__ : Node | None = None snake_case__ : Node | None = None def __snake_case( _lowerCAmelCase ) -> None: # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def __snake_case( _lowerCAmelCase ) -> int: return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def __snake_case( _lowerCAmelCase ) -> bool: if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def __snake_case( ) -> None: # Main function for testing. snake_case__ : int = Node(1 ) snake_case__ : Dict = Node(2 ) snake_case__ : str = Node(3 ) snake_case__ : int = Node(4 ) snake_case__ : List[Any] = Node(5 ) snake_case__ : List[Any] = Node(6 ) snake_case__ : Optional[Any] = Node(7 ) snake_case__ : Optional[Any] = Node(8 ) snake_case__ : Dict = Node(9 ) print(is_full_binary_tree(_lowerCAmelCase ) ) print(depth_of_tree(_lowerCAmelCase ) ) print("""Tree is: """ ) display(_lowerCAmelCase ) if __name__ == "__main__": main()
35
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Tuple = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): snake_case__ : str = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): snake_case__ : Optional[int] = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case__ : int = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] snake_case__ : Any = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(_lowerCAmelCase )-1}" ) if "norm" in key: snake_case__ : Union[str, Any] = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] snake_case__ : str = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(_lowerCAmelCase )-1}" ) if "layer_norm1" in key: snake_case__ : str = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: snake_case__ : Optional[int] = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 snake_case__ : Dict = key[key.find("""block""" ) + len("""block""" )] snake_case__ : Any = key.replace(f"block{idx}" , f"block.{int(_lowerCAmelCase )-1}" ) if "attn.q" in key: snake_case__ : str = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: snake_case__ : Optional[Any] = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: snake_case__ : Tuple = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: snake_case__ : List[Any] = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: snake_case__ : List[str] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: snake_case__ : Dict = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: snake_case__ : Union[str, Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) snake_case__ : Tuple = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case__ : Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )] snake_case__ : str = key.replace(f"linear_c{idx}" , f"linear_c.{int(_lowerCAmelCase )-1}" ) if "bot_conv" in key: snake_case__ : Dict = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: snake_case__ : Union[str, Any] = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: snake_case__ : Tuple = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: snake_case__ : Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: snake_case__ : List[str] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: snake_case__ : Optional[int] = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: snake_case__ : Tuple = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): snake_case__ : Dict = key.replace("""module.last_layer_depth""" , """head.head""" ) snake_case__ : List[str] = value return new_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case__ : Tuple = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" ) snake_case__ : List[Any] = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict snake_case__ : str = kv_weight[ : config.hidden_sizes[i], : ] snake_case__ : List[str] = kv_bias[: config.hidden_sizes[i]] snake_case__ : Union[str, Any] = kv_weight[ config.hidden_sizes[i] :, : ] snake_case__ : List[str] = kv_bias[config.hidden_sizes[i] :] def __snake_case( ) -> Optional[Any]: snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return image @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=None ) -> Optional[int]: snake_case__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) snake_case__ : Optional[Any] = GLPNImageProcessor() # prepare image snake_case__ : Optional[int] = prepare_img() snake_case__ : Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict snake_case__ : List[Any] = torch.load(_lowerCAmelCase , map_location=torch.device("""cpu""" ) ) # rename keys snake_case__ : str = rename_keys(_lowerCAmelCase ) # key and value matrices need special treatment read_in_k_v(_lowerCAmelCase , _lowerCAmelCase ) # create HuggingFace model and load state dict snake_case__ : int = GLPNForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # forward pass snake_case__ : int = model(_lowerCAmelCase ) snake_case__ : List[str] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: snake_case__ : Dict = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: snake_case__ : Optional[int] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f"Unknown model name: {model_name}" ) snake_case__ : List[str] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' import qiskit def __snake_case( _lowerCAmelCase = 2 ) -> qiskit.result.counts.Counts: snake_case__ : Optional[Any] = qubits # Using Aer's simulator snake_case__ : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register snake_case__ : Any = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , _lowerCAmelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , _lowerCAmelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(_lowerCAmelCase ) ) , list(range(_lowerCAmelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 ) return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": print(F"Total count for various states are: {quantum_entanglement(3)}")
35
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Union[str, Any] = """""" for i in table: res += inp[i - 1] return res def __snake_case( _lowerCAmelCase ) -> List[Any]: return data[1:] + data[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = """""" for i in range(len(_lowerCAmelCase ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: snake_case__ : List[str] = int("""0b""" + data[0] + data[-1] , 2 ) snake_case__ : str = int("""0b""" + data[1:3] , 2 ) return bin(s[row][col] )[2:] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Dict = message[:4] snake_case__ : Optional[Any] = message[4:] snake_case__ : int = apply_table(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : int = xor(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = apply_sbox(_lowerCAmelCase , temp[:4] ) # noqa: E741 snake_case__ : Any = apply_sbox(_lowerCAmelCase , temp[4:] ) snake_case__ : Optional[int] = """0""" * (2 - len(_lowerCAmelCase )) + l # noqa: E741 snake_case__ : Optional[Any] = """0""" * (2 - len(_lowerCAmelCase )) + r snake_case__ : int = apply_table(l + r , _lowerCAmelCase ) snake_case__ : Union[str, Any] = xor(_lowerCAmelCase , _lowerCAmelCase ) return temp + right if __name__ == "__main__": __a = input("Enter 10 bit key: ") __a = input("Enter 8 bit message: ") __a = [6, 3, 7, 4, 8, 5, 10, 9] __a = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __a = [2, 4, 3, 1] __a = [2, 6, 3, 1, 4, 8, 5, 7] __a = [4, 1, 3, 5, 7, 2, 8, 6] __a = [4, 1, 2, 3, 2, 3, 4, 1] __a = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __a = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __a = apply_table(key, paa_table) __a = temp[:5] __a = temp[5:] __a = left_shift(left) __a = left_shift(right) __a = apply_table(left + right, pa_table) __a = left_shift(left) __a = left_shift(right) __a = left_shift(left) __a = left_shift(right) __a = apply_table(left + right, pa_table) # encryption __a = apply_table(message, IP) __a = function(expansion, sa, sa, keya, temp) __a = temp[4:] + temp[:4] __a = function(expansion, sa, sa, keya, temp) __a = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption __a = apply_table(CT, IP) __a = function(expansion, sa, sa, keya, temp) __a = temp[4:] + temp[:4] __a = function(expansion, sa, sa, keya, temp) __a = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
35
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Dict = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : int = """.""".join(_lowerCAmelCase ) return test_module_path def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_module_path(_lowerCAmelCase ) snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase ) return test_module def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = [] snake_case__ : Any = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Any = get_test_classes(_lowerCAmelCase ) snake_case__ : Optional[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Optional[int] = test_class() if hasattr(_lowerCAmelCase , """setUp""" ): test.setUp() snake_case__ : Any = None if hasattr(_lowerCAmelCase , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Tuple = test.model_tester.__class__ return model_tester def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = get_model_classes(_lowerCAmelCase ) snake_case__ : Any = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase ) snake_case__ : str = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
35
1
'''simple docstring''' from functools import reduce __a = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __snake_case( _lowerCAmelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(F"{solution() = }")
35
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
35
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import torch from transformers import AutoModel class UpperCAmelCase_ ( torch.nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , snake_case_ : Optional[Any]="sayef/fsner-bert-base-uncased" ): super(snake_case_ , self ).__init__() snake_case__ : Any = AutoModel.from_pretrained(snake_case_ , return_dict=snake_case_ ) snake_case__ : List[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 ) snake_case__ : Optional[Any] = torch.nn.Softmax(dim=1 ) def lowerCamelCase ( self : Optional[int] , **snake_case_ : str ): return self.bert(**snake_case_ ).last_hidden_state def lowerCamelCase ( self : Any , snake_case_ : List[Any] ): return token_embeddings.sum(2 , keepdim=snake_case_ ) def lowerCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1 ): return self.softmax(T * self.cos(snake_case_ , snake_case_ ) ) def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : int ): snake_case__ : List[Any] = W_supports["""sizes"""].tolist() snake_case__ : Any = W_supports["""start_token_id"""].item() snake_case__ : Union[str, Any] = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] snake_case__ : Union[str, Any] = self.BERT(**snake_case_ ) snake_case__ : Optional[Any] = self.BERT(**snake_case_ ) snake_case__ : Dict = None snake_case__ : List[str] = None snake_case__ : List[str] = W_supports["""input_ids"""] == start_token_id snake_case__ : Dict = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(snake_case_ ): if i == 0: snake_case__ : Optional[Any] = 0 else: snake_case__ : Any = support_sizes[i - 1] snake_case__ : List[Any] = S[s : s + size][start_token_masks[s : s + size]] snake_case__ : Tuple = S[s : s + size][end_token_masks[s : s + size]] snake_case__ : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) snake_case__ : Tuple = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: snake_case__ : Optional[int] = torch.vstack((p_starts, p_start) ) snake_case__ : List[str] = torch.vstack((p_ends, p_end) ) else: snake_case__ : List[str] = p_start snake_case__ : Tuple = p_end return p_starts, p_ends
35
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __a = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ViTFeatureExtractor"] __a = ["ViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "VIT_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TFViTForImageClassification", "TFViTModel", "TFViTPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "FlaxViTForImageClassification", "FlaxViTModel", "FlaxViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' import string from math import logaa def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]: snake_case__ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' snake_case__ : Any = corpus_without_punctuation.split("""\n""" ) snake_case__ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase )) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float: return round(tf * idf , 3 )
35
1
'''simple docstring''' import os def __snake_case( ) -> Optional[Any]: with open(os.path.dirname(_lowerCAmelCase ) + """/p022_names.txt""" ) as file: snake_case__ : int = str(file.readlines()[0] ) snake_case__ : Tuple = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() snake_case__ : Union[str, Any] = 0 snake_case__ : List[str] = 0 for i, name in enumerate(_lowerCAmelCase ): for letter in name: name_score += ord(_lowerCAmelCase ) - 64 total_score += (i + 1) * name_score snake_case__ : List[Any] = 0 return total_score if __name__ == "__main__": print(solution())
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Tuple , *snake_case_ : List[Any] , **snake_case_ : Optional[int] ): warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __a = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" __a = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" __a = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def __snake_case( _lowerCAmelCase ) -> List[str]: def remove_articles(_lowerCAmelCase ): snake_case__ : int = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(_lowerCAmelCase , """ """ , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): snake_case__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams] snake_case__ : Optional[int] = Counter(_lowerCAmelCase ) snake_case__ : Tuple = Counter(_lowerCAmelCase ) snake_case__ : Any = Counter() for sgram, scount in sgramcounter.items(): snake_case__ : str = scount * numref snake_case__ : Tuple = Counter(_lowerCAmelCase ) snake_case__ : str = Counter() for cgram, ccount in cgramcounter.items(): snake_case__ : Union[str, Any] = ccount * numref # KEEP snake_case__ : str = sgramcounter_rep & cgramcounter_rep snake_case__ : Any = keepgramcounter_rep & rgramcounter snake_case__ : List[str] = sgramcounter_rep & rgramcounter snake_case__ : Any = 0 snake_case__ : List[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case__ : int = 1 snake_case__ : List[str] = 1 if len(_lowerCAmelCase ) > 0: snake_case__ : Any = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) snake_case__ : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() ) snake_case__ : Tuple = 0 if keepscore_precision > 0 or keepscore_recall > 0: snake_case__ : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION snake_case__ : Any = sgramcounter_rep - cgramcounter_rep snake_case__ : Any = delgramcounter_rep - rgramcounter snake_case__ : Optional[int] = sgramcounter_rep - rgramcounter snake_case__ : Tuple = 0 snake_case__ : Dict = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case__ : Any = 1 if len(_lowerCAmelCase ) > 0: snake_case__ : Tuple = deltmpscorea / len(_lowerCAmelCase ) # ADDITION snake_case__ : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) snake_case__ : Optional[Any] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) snake_case__ : Tuple = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) snake_case__ : Any = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case__ : Optional[Any] = 1 snake_case__ : int = 1 if len(_lowerCAmelCase ) > 0: snake_case__ : Optional[Any] = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: snake_case__ : Union[str, Any] = addtmpscore / len(_lowerCAmelCase ) snake_case__ : Any = 0 if addscore_precision > 0 or addscore_recall > 0: snake_case__ : Dict = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Tuple = len(_lowerCAmelCase ) snake_case__ : Union[str, Any] = ssent.split(""" """ ) snake_case__ : Tuple = csent.split(""" """ ) snake_case__ : Dict = [] snake_case__ : int = [] snake_case__ : Union[str, Any] = [] snake_case__ : Dict = [] snake_case__ : Union[str, Any] = [] snake_case__ : Any = [] snake_case__ : Optional[int] = [] snake_case__ : Any = [] snake_case__ : Dict = [] snake_case__ : Tuple = [] for rsent in rsents: snake_case__ : Dict = rsent.split(""" """ ) snake_case__ : str = [] snake_case__ : int = [] snake_case__ : int = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: snake_case__ : Dict = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: snake_case__ : str = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: snake_case__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: snake_case__ : str = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: snake_case__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: snake_case__ : List[str] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: snake_case__ : int = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: snake_case__ : Optional[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: snake_case__ : str = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((snake_case__) , (snake_case__) , (snake_case__)) : Any = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((snake_case__) , (snake_case__) , (snake_case__)) : Dict = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((snake_case__) , (snake_case__) , (snake_case__)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((snake_case__) , (snake_case__) , (snake_case__)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) snake_case__ : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 snake_case__ : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 snake_case__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4 snake_case__ : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __snake_case( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Dict: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: snake_case__ : int = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: snake_case__ : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: snake_case__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": snake_case__ : Any = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": snake_case__ : Tuple = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: snake_case__ : Union[str, Any] = sentence if not return_str: snake_case__ : str = normalized_sent.split() return normalized_sent def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("""Sources length must match predictions and references lengths.""" ) snake_case__ : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) snake_case__ : int = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> Any: snake_case__ : Optional[int] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) snake_case__ : Tuple = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] snake_case__ : List[str] = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): """simple docstring""" def lowerCamelCase ( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] , reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : str ): snake_case__ : List[str] = {} result.update({"""sari""": compute_sari(sources=snake_case_ , predictions=snake_case_ , references=snake_case_ )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=snake_case_ , references=snake_case_ )} ) result.update({"""exact""": compute_em(predictions=snake_case_ , references=snake_case_ )} ) return result
35
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : List[str] = 0.01 with locka.acquire(): with pytest.raises(_lowerCAmelCase ): snake_case__ : str = time.time() locka.acquire(_lowerCAmelCase ) assert time.time() - _start > timeout def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = """a""" * 1_000 + """.lock""" snake_case__ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(_lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCAmelCase ): locka.acquire(0 )
35
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : """simple docstring""" def __init__( self : int ): snake_case__ : Dict = {} def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str ): snake_case__ : Union[str, Any] = {} def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : str , snake_case_ : float ): if nodea not in self.connections: self.add_node(snake_case_ ) if nodea not in self.connections: self.add_node(snake_case_ ) snake_case__ : Optional[Any] = probability def lowerCamelCase ( self : List[str] ): return list(self.connections ) def lowerCamelCase ( self : List[Any] , snake_case_ : str ): snake_case__ : Union[str, Any] = 0 snake_case__ : Optional[Any] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> dict[str, int]: snake_case__ : List[Any] = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = Counter(graph.get_nodes() ) snake_case__ : Optional[int] = start for _ in range(_lowerCAmelCase ): snake_case__ : Union[str, Any] = graph.transition(_lowerCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __snake_case( ) -> List[str]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) snake_case__ : Tuple = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def lowerCamelCase ( self : Tuple ): snake_case__ : Optional[Any] = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() snake_case__ : str = [sys.executable] + distributed_args execute_subprocess_async(snake_case_ , env=os.environ.copy() )
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 , dataset_name="""my_dataset""" )} ), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 )} ), SplitDict({"""train""": SplitInfo()} ), ] , ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[Any] = split_dict._to_yaml_list() assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) snake_case__ : List[Any] = SplitDict._from_yaml_list(_lowerCAmelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump snake_case__ : Optional[int] = None # the split name of split_dict takes over the name of the split info object snake_case__ : List[Any] = split_name assert split_dict == reloaded @pytest.mark.parametrize( """split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="""my_dataset""" )] ) def __snake_case( _lowerCAmelCase ) -> Dict: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files snake_case__ : Any = asdict(SplitDict({"""train""": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
35
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[Any] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : Dict = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
1
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : List[Any] ): snake_case__ : str = tempfile.mkdtemp() snake_case__ : Tuple = 5 # Realm tok snake_case__ : Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """test""", """question""", """this""", """is""", """the""", """first""", """second""", """third""", """fourth""", """fifth""", """record""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """realm_tokenizer""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Union[str, Any] = os.path.join(snake_case_ , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case__ : List[Any] = os.path.join(self.tmpdirname , """realm_block_records""" ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) def lowerCamelCase ( self : List[str] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) ) def lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : int ): snake_case__ : List[Any] = RealmConfig(num_block_records=self.num_block_records ) return config def lowerCamelCase ( self : int ): snake_case__ : str = Dataset.from_dict( { """id""": ["""0""", """1"""], """question""": ["""foo""", """bar"""], """answers""": [["""Foo""", """Bar"""], ["""Bar"""]], } ) return dataset def lowerCamelCase ( self : List[str] ): snake_case__ : str = np.array( [ b"""This is the first record""", b"""This is the second record""", b"""This is the third record""", b"""This is the fourth record""", b"""This is the fifth record""", b"""This is a longer longer longer record""", ] , dtype=snake_case_ , ) return block_records def lowerCamelCase ( self : int ): snake_case__ : Dict = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCamelCase ( self : int ): snake_case__ : Tuple = self.get_config() snake_case__ : Any = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : Optional[int] = np.array([0, 3] , dtype="""long""" ) snake_case__ : List[Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : List[str] = tokenizer( ["""the fourth"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : List[Any] = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(len(snake_case_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : List[str] = self.get_config() snake_case__ : List[str] = self.get_dummy_retriever() snake_case__ : str = retriever.tokenizer snake_case__ : List[Any] = np.array([0, 3, 5] , dtype="""long""" ) snake_case__ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids snake_case__ : Optional[int] = tokenizer( ["""the fourth""", """longer longer"""] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids snake_case__ : Any = config.reader_seq_len snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = retriever( snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) self.assertEqual([False, True, True] , snake_case_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : List[str] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) # Test local path snake_case__ : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" ) # Test mocked remote path with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download: snake_case__ : Tuple = os.path.join( os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME ) snake_case__ : List[str] = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" ) self.assertEqual(retriever.block_records[0] , b"""This is the first record""" )
35
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
1
'''simple docstring''' import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __a = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case( _lowerCAmelCase ) -> Dict: warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , _lowerCAmelCase , ) if isinstance(_lowerCAmelCase , torch.Tensor ): return image elif isinstance(_lowerCAmelCase , PIL.Image.Image ): snake_case__ : Optional[Any] = [image] if isinstance(image[0] , PIL.Image.Image ): snake_case__ , snake_case__ : Tuple = image[0].size snake_case__ , snake_case__ : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 snake_case__ : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] snake_case__ : Optional[int] = np.concatenate(_lowerCAmelCase , axis=0 ) snake_case__ : Tuple = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0 snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 ) snake_case__ : Any = 2.0 * image - 1.0 snake_case__ : Optional[int] = torch.from_numpy(_lowerCAmelCase ) elif isinstance(image[0] , torch.Tensor ): snake_case__ : Any = torch.cat(_lowerCAmelCase , dim=0 ) return image def __snake_case( _lowerCAmelCase ) -> List[str]: if isinstance(_lowerCAmelCase , torch.Tensor ): return mask elif isinstance(_lowerCAmelCase , PIL.Image.Image ): snake_case__ : Any = [mask] if isinstance(mask[0] , PIL.Image.Image ): snake_case__ , snake_case__ : Optional[int] = mask[0].size snake_case__ , snake_case__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 snake_case__ : int = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] snake_case__ : Dict = np.concatenate(_lowerCAmelCase , axis=0 ) snake_case__ : int = mask.astype(np.floataa ) / 255.0 snake_case__ : Optional[Any] = 0 snake_case__ : str = 1 snake_case__ : Dict = torch.from_numpy(_lowerCAmelCase ) elif isinstance(mask[0] , torch.Tensor ): snake_case__ : Optional[Any] = torch.cat(_lowerCAmelCase , dim=0 ) return mask class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = 42 lowercase = 42 def __init__( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ): super().__init__() self.register_modules(unet=snake_case_ , scheduler=snake_case_ ) @torch.no_grad() def __call__( self : Optional[Any] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] , snake_case_ : Union[torch.Tensor, PIL.Image.Image] , snake_case_ : int = 250 , snake_case_ : float = 0.0 , snake_case_ : int = 10 , snake_case_ : int = 10 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ): snake_case__ : Optional[Any] = image snake_case__ : Dict = _preprocess_image(snake_case_ ) snake_case__ : int = original_image.to(device=self.device , dtype=self.unet.dtype ) snake_case__ : int = _preprocess_mask(snake_case_ ) snake_case__ : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype ) snake_case__ : Union[str, Any] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) snake_case__ : Any = original_image.shape snake_case__ : Any = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(snake_case_ , snake_case_ , snake_case_ , self.device ) snake_case__ : Dict = eta snake_case__ : Optional[int] = self.scheduler.timesteps[0] + 1 snake_case__ : List[str] = generator[0] if isinstance(snake_case_ , snake_case_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual snake_case__ : Optional[int] = self.unet(snake_case_ , snake_case_ ).sample # compute previous image: x_t -> x_t-1 snake_case__ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t snake_case__ : str = self.scheduler.undo_step(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = t snake_case__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case__ : str = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ )
35
'''simple docstring''' import numpy as np from transformers import Pipeline def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase ) snake_case__ : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ): snake_case__ : Optional[int] = {} if "second_text" in kwargs: snake_case__ : Union[str, Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : List[Any] ): snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy() snake_case__ : List[str] = softmax(snake_case_ ) snake_case__ : List[str] = np.argmax(snake_case_ ) snake_case__ : List[str] = self.model.config.idalabel[best_class] snake_case__ : Optional[int] = probabilities[best_class].item() snake_case__ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
35
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase ( self : str ): snake_case__ : str = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case__ : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house snake_case__ : List[Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim snake_case__ : Dict = torch.tensor( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case__ : str = model(snake_case_ )["""last_hidden_state"""].detach() self.assertEqual(output.shape , snake_case_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : List[str] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case__ : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house snake_case__ : Optional[int] = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim snake_case__ : int = torch.tensor( [[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case__ : List[Any] = model(snake_case_ )["""last_hidden_state"""].detach() self.assertEqual(output.shape , snake_case_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
35
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __snake_case( _lowerCAmelCase ) -> Any: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __snake_case( _lowerCAmelCase ) -> List[str]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(R"| /\ | |- | |- |--| |\ /| |-") print(R"|/ \| |- |_ |_ |__| | \/ | |_") __a = 1 while K: __a = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) __a = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
35
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: if isinstance(_lowerCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class UpperCAmelCase_ : """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ): pass def lowerCamelCase ( self : Optional[int] ): pass def lowerCamelCase ( self : Optional[Any] ): pass def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[Any]=None , **snake_case_ : List[Any] ): snake_case__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel(snake_case_ ) snake_case__ : Tuple = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[Any]=None , **snake_case_ : Union[str, Any] ): snake_case__ , snake_case__ : List[str] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str=None , **snake_case_ : Union[str, Any] ): snake_case__ , snake_case__ : Dict = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model} snake_case__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case_ ) snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCamelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : int=None , **snake_case_ : str ): snake_case__ , snake_case__ : Union[str, Any] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Any = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) snake_case__ : int = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(snake_case_ ) snake_case__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ ) snake_case__ : Dict = model(input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ ) snake_case__ : Tuple = after_output[0].numpy() snake_case__ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case_ , 1E-5 ) def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , **snake_case_ : List[str] ): snake_case__ , snake_case__ : Optional[int] = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : int = model( input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ ) snake_case__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : Optional[Any] = to_atuple(vision_model.config.image_size ) snake_case__ : str = to_atuple(vision_model.config.patch_size ) snake_case__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Union[str, Any] = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Any = output.text_model_output.attentions self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase ( self : str , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float ): snake_case__ : List[Any] = np.abs((a - b) ).max() self.assertLessEqual(snake_case_ , snake_case_ , f"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCamelCase ( self : Any ): snake_case__ : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**snake_case_ ) def lowerCamelCase ( self : str ): snake_case__ : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**snake_case_ ) def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() self.check_save_load(**snake_case_ ) def lowerCamelCase ( self : int ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**snake_case_ ) @slow def lowerCamelCase ( self : str ): snake_case__ , snake_case__ : Any = self.get_pretrained_model_and_inputs() snake_case__ : Union[str, Any] = model_a(**snake_case_ ) snake_case__ : Union[str, Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(snake_case_ ) snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(snake_case_ ) snake_case__ : int = model_a(**snake_case_ ) snake_case__ : Dict = after_outputs[0].numpy() snake_case__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(snake_case_ , 1E-5 ) @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Optional[int] ): snake_case__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) snake_case__ : Optional[int] = 13 snake_case__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : Any = random_attention_mask([batch_size, 4] ) snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] ): snake_case__ : Union[str, Any] = TFViTModel(snake_case_ , name="""vision_model""" ) snake_case__ : Any = TFBertModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : str ): snake_case__ : Union[str, Any] = TFViTModelTester(self ) snake_case__ : str = TFBertModelTester(self ) snake_case__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() snake_case__ : Any = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Optional[Any] = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Dict ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) snake_case__ : Any = 13 snake_case__ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : Optional[Any] = random_attention_mask([batch_size, 4] ) snake_case__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : List[str] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] ): snake_case__ , snake_case__ : Any = self.get_vision_text_model(snake_case_ , snake_case_ ) snake_case__ : Tuple = TFVisionTextDualEncoderModel(vision_model=snake_case_ , text_model=snake_case_ ) snake_case__ : Union[str, Any] = model( input_ids=snake_case_ , pixel_values=snake_case_ , attention_mask=snake_case_ , output_attentions=snake_case_ ) snake_case__ : str = output.vision_model_output.attentions self.assertEqual(len(snake_case_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) snake_case__ : Tuple = to_atuple(vision_model.config.image_size ) snake_case__ : List[Any] = to_atuple(vision_model.config.patch_size ) snake_case__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) snake_case__ : Dict = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) snake_case__ : Optional[Any] = output.text_model_output.attentions self.assertEqual(len(snake_case_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): snake_case__ : Union[str, Any] = TFDeiTModel(snake_case_ , name="""vision_model""" ) snake_case__ : Tuple = TFRobertaModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : List[str] ): snake_case__ : int = TFDeiTModelTester(self ) snake_case__ : Union[str, Any] = TFRobertaModelTester(self ) snake_case__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs() snake_case__ : str = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : Any = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Dict = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) snake_case__ : Tuple = 13 snake_case__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) snake_case__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) snake_case__ : str = random_attention_mask([batch_size, 4] ) snake_case__ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : int ): snake_case__ : List[str] = TFCLIPVisionModel(snake_case_ , name="""vision_model""" ) snake_case__ : Optional[Any] = TFBertModel(snake_case_ , name="""text_model""" ) return vision_model, text_model def lowerCamelCase ( self : Dict ): snake_case__ : int = TFCLIPVisionModelTester(self ) snake_case__ : Optional[int] = TFBertModelTester(self ) snake_case__ : str = clip_model_tester.prepare_config_and_inputs() snake_case__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs() snake_case__ , snake_case__ : List[Any] = vision_config_and_inputs ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=snake_case_ ) snake_case__ : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) snake_case__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case__ : List[str] = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=snake_case_ , padding=snake_case_ , return_tensors="""np""" ) snake_case__ : int = model(**snake_case_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) snake_case__ : Optional[int] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , snake_case_ , atol=1E-3 ) )
35
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
35
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = StableDiffusionXLImgaImgPipeline lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} lowercase = PipelineTesterMixin.required_optional_params - {"latents"} lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase ( self : List[Any] ): torch.manual_seed(0 ) snake_case__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) snake_case__ : Optional[int] = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) snake_case__ : Optional[Any] = CLIPTextModel(snake_case_ ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ ) snake_case__ : Dict = CLIPTextModelWithProjection(snake_case_ ) snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ ) snake_case__ : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Tuple=0 ): snake_case__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) snake_case__ : Union[str, Any] = image / 2 + 0.5 if str(snake_case_ ).startswith("""mps""" ): snake_case__ : List[str] = torch.manual_seed(snake_case_ ) else: snake_case__ : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) snake_case__ : Optional[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def lowerCamelCase ( self : Dict ): snake_case__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[Any] = self.get_dummy_components() snake_case__ : Any = StableDiffusionXLImgaImgPipeline(**snake_case_ ) snake_case__ : Optional[int] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) snake_case__ : Optional[Any] = self.get_dummy_inputs(snake_case_ ) snake_case__ : Any = sd_pipe(**snake_case_ ).images snake_case__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case__ : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase ( self : Union[str, Any] ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def lowerCamelCase ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCamelCase ( self : Any ): pass def lowerCamelCase ( self : List[str] ): snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : str = StableDiffusionXLImgaImgPipeline(**snake_case_ ) snake_case__ : Optional[int] = sd_pipe.to(snake_case_ ) snake_case__ : Optional[Any] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) # forward without prompt embeds snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ ) snake_case__ : List[Any] = 3 * ["""this is a negative prompt"""] snake_case__ : Union[str, Any] = negative_prompt snake_case__ : Optional[Any] = 3 * [inputs["""prompt"""]] snake_case__ : str = sd_pipe(**snake_case_ ) snake_case__ : str = output.images[0, -3:, -3:, -1] # forward with prompt embeds snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ ) snake_case__ : Any = 3 * ["""this is a negative prompt"""] snake_case__ : Any = 3 * [inputs.pop("""prompt""" )] ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Union[str, Any] = sd_pipe.encode_prompt(snake_case_ , negative_prompt=snake_case_ ) snake_case__ : List[str] = sd_pipe( **snake_case_ , prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , pooled_prompt_embeds=snake_case_ , negative_pooled_prompt_embeds=snake_case_ , ) snake_case__ : Optional[int] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int]="cpu" , snake_case_ : Dict=torch.floataa , snake_case_ : Optional[int]=0 ): snake_case__ : Tuple = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) snake_case__ : List[Any] = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) ) snake_case__ : str = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) snake_case__ : List[Any] = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCamelCase ( self : List[Any] ): snake_case__ : Optional[Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case__ : Optional[Any] = self.get_inputs(snake_case_ ) snake_case__ : List[str] = pipe(**snake_case_ ).images snake_case__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) snake_case__ : int = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Dict ): snake_case__ : Tuple = tempfile.mkdtemp() # fmt: off snake_case__ : Optional[int] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) snake_case__ : List[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] snake_case__ : Union[str, Any] = {"""unk_token""": """<unk>"""} snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(snake_case_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(snake_case_ ) ) snake_case__ : List[str] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } snake_case__ : int = os.path.join(self.tmpdirname , snake_case_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(snake_case_ , snake_case_ ) def lowerCamelCase ( self : int , **snake_case_ : List[Any] ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **snake_case_ ) def lowerCamelCase ( self : Dict , **snake_case_ : int ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **snake_case_ ) def lowerCamelCase ( self : str , **snake_case_ : Union[str, Any] ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] snake_case__ : int = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase ( self : Optional[Any] ): snake_case__ : List[Any] = self.get_tokenizer() snake_case__ : int = self.get_rust_tokenizer() snake_case__ : Optional[int] = self.get_image_processor() snake_case__ : List[Any] = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) processor_slow.save_pretrained(self.tmpdirname ) snake_case__ : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ ) snake_case__ : List[Any] = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) processor_fast.save_pretrained(self.tmpdirname ) snake_case__ : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case_ ) self.assertIsInstance(processor_fast.tokenizer , snake_case_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case_ ) self.assertIsInstance(processor_fast.image_processor , snake_case_ ) def lowerCamelCase ( self : List[Any] ): snake_case__ : Dict = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) snake_case__ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case_ ) snake_case__ : Optional[Any] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case_ ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.get_image_processor() snake_case__ : Optional[int] = self.get_tokenizer() snake_case__ : List[str] = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) snake_case__ : List[Any] = self.prepare_image_inputs() snake_case__ : str = image_processor(snake_case_ , return_tensors="""np""" ) snake_case__ : Optional[Any] = processor(images=snake_case_ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCamelCase ( self : Dict ): snake_case__ : Union[str, Any] = self.get_image_processor() snake_case__ : str = self.get_tokenizer() snake_case__ : Tuple = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) snake_case__ : Tuple = """lower newer""" snake_case__ : Tuple = processor(text=snake_case_ , return_tensors="""np""" ) snake_case__ : Any = tokenizer(snake_case_ , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def lowerCamelCase ( self : Optional[int] ): snake_case__ : Any = self.get_image_processor() snake_case__ : List[str] = self.get_tokenizer() snake_case__ : str = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) snake_case__ : List[str] = """lower newer""" snake_case__ : Optional[int] = self.prepare_image_inputs() snake_case__ : Optional[int] = processor(text=snake_case_ , images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowerCamelCase ( self : Dict ): snake_case__ : Optional[int] = """google/owlvit-base-patch32""" snake_case__ : Optional[int] = OwlViTProcessor.from_pretrained(snake_case_ ) snake_case__ : Any = ["""cat""", """nasa badge"""] snake_case__ : Optional[Any] = processor(text=snake_case_ ) snake_case__ : int = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowerCamelCase ( self : str ): snake_case__ : int = """google/owlvit-base-patch32""" snake_case__ : str = OwlViTProcessor.from_pretrained(snake_case_ ) snake_case__ : Union[str, Any] = [["""cat""", """nasa badge"""], ["""person"""]] snake_case__ : str = processor(text=snake_case_ ) snake_case__ : Union[str, Any] = 16 snake_case__ : Optional[int] = len(snake_case_ ) snake_case__ : Optional[int] = max([len(snake_case_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowerCamelCase ( self : Tuple ): snake_case__ : str = """google/owlvit-base-patch32""" snake_case__ : Optional[int] = OwlViTProcessor.from_pretrained(snake_case_ ) snake_case__ : int = ["""cat""", """nasa badge"""] snake_case__ : Optional[int] = processor(text=snake_case_ ) snake_case__ : Union[str, Any] = 16 snake_case__ : Any = inputs["""input_ids"""] snake_case__ : Optional[int] = [ [49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def lowerCamelCase ( self : Dict ): snake_case__ : List[str] = self.get_image_processor() snake_case__ : Any = self.get_tokenizer() snake_case__ : Optional[Any] = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) snake_case__ : List[str] = self.prepare_image_inputs() snake_case__ : int = self.prepare_image_inputs() snake_case__ : Tuple = processor(images=snake_case_ , query_images=snake_case_ ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(snake_case_ ): processor() def lowerCamelCase ( self : Dict ): snake_case__ : Any = self.get_image_processor() snake_case__ : str = self.get_tokenizer() snake_case__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case_ , image_processor=snake_case_ ) snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case__ : List[Any] = processor.batch_decode(snake_case_ ) snake_case__ : str = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ )
35
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
1
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __snake_case( ) -> Union[str, Any]: raise RuntimeError("""CUDA out of memory.""" ) class UpperCAmelCase_ ( nn.Module ): """simple docstring""" def __init__( self : Any ): super().__init__() snake_case__ : List[str] = nn.Linear(3 , 4 ) snake_case__ : Optional[Any] = nn.BatchNormad(4 ) snake_case__ : Optional[int] = nn.Linear(4 , 5 ) def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ): return self.lineara(self.batchnorm(self.lineara(snake_case_ ) ) ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : List[Any] ): snake_case__ : Any = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case_ : Dict ): nonlocal batch_sizes batch_sizes.append(snake_case_ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(snake_case_ , [128, 64, 32, 16, 8] ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : int = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case_ : List[str] , snake_case_ : Any ): nonlocal batch_sizes batch_sizes.append(snake_case_ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga snake_case__ , snake_case__ : int = mock_training_loop_function("""hello""" ) self.assertListEqual(snake_case_ , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def lowerCamelCase ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(snake_case_ : List[str] ): pass with self.assertRaises(snake_case_ ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def lowerCamelCase ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case_ : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(snake_case_ ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def lowerCamelCase ( self : List[str] ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Any ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(snake_case_ ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def lowerCamelCase ( self : Optional[int] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case_ : Optional[Any] ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(snake_case_ ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : int = torch.cuda.memory_allocated() snake_case__ : Dict = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , snake_case_ ) snake_case__ : List[str] = release_memory(snake_case_ ) self.assertEqual(torch.cuda.memory_allocated() , snake_case_ )
35
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
1
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]: snake_case__ : str = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") ) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Any: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Any = """""" else: snake_case__ : str = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : int = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Dict = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : Tuple = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : List[str] = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: snake_case__ : List[Any] = dct.pop(_lowerCAmelCase ) snake_case__ : List[str] = val def __snake_case( ) -> int: snake_case__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple: snake_case__ : List[str] = BitConfig( global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=_lowerCAmelCase , ) snake_case__ : Optional[Any] = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=384 , num_labels=1_000 ) snake_case__ : Union[str, Any] = False # load original model from timm snake_case__ : Union[str, Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[int] = timm_model.state_dict() if base_model: remove_classification_head_(_lowerCAmelCase ) snake_case__ : Any = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) snake_case__ : str = """huggingface/label-files""" snake_case__ : str = """imagenet-1k-id2label.json""" snake_case__ : List[str] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Tuple = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ : Tuple = ViTHybridModel(_lowerCAmelCase ).eval() else: snake_case__ : str = ViTHybridForImageClassification(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # create image processor snake_case__ : int = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) ) snake_case__ : List[str] = transform.transforms snake_case__ : List[str] = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } snake_case__ : Optional[int] = ViTHybridImageProcessor( do_resize=_lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case__ : Union[str, Any] = prepare_img() snake_case__ : Dict = transform(_lowerCAmelCase ).unsqueeze(0 ) snake_case__ : int = processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) # verify logits with torch.no_grad(): snake_case__ : Optional[Any] = model(_lowerCAmelCase ) snake_case__ : List[Any] = outputs.logits print("""Predicted class:""" , logits.argmax(-1 ).item() ) if base_model: snake_case__ : List[str] = timm_model.forward_features(_lowerCAmelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 ) else: snake_case__ : Any = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: print(f"Pushing model and processor to the hub {vit_name}" ) model.push_to_hub(f"ybelkada/{vit_name}" ) processor.push_to_hub(f"ybelkada/{vit_name}" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) __a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
35
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def __snake_case( ) -> Generator[int, None, None]: snake_case__ : dict[int, int] = {} snake_case__ : Any = 2 while True: snake_case__ : int = factor_map.pop(_lowerCAmelCase , _lowerCAmelCase ) if factor: snake_case__ : Any = factor + prime while x in factor_map: x += factor snake_case__ : Any = factor else: snake_case__ : Dict = prime yield prime prime += 1 def __snake_case( _lowerCAmelCase = 1e10 ) -> int: snake_case__ : Tuple = sieve() snake_case__ : List[Any] = 1 while True: snake_case__ : Tuple = next(_lowerCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_lowerCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
35
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Dict = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : int = """.""".join(_lowerCAmelCase ) return test_module_path def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_module_path(_lowerCAmelCase ) snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase ) return test_module def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = [] snake_case__ : Any = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Any = get_test_classes(_lowerCAmelCase ) snake_case__ : Optional[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Optional[int] = test_class() if hasattr(_lowerCAmelCase , """setUp""" ): test.setUp() snake_case__ : Any = None if hasattr(_lowerCAmelCase , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Tuple = test.model_tester.__class__ return model_tester def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = get_model_classes(_lowerCAmelCase ) snake_case__ : Any = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase ) snake_case__ : str = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
35
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "andreasmadsen/efficient_mlm_m0.40": ( "https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json" ), } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "roberta-prelayernorm" def __init__( self : Optional[int] , snake_case_ : List[Any]=50_265 , snake_case_ : Union[str, Any]=768 , snake_case_ : List[str]=12 , snake_case_ : List[str]=12 , snake_case_ : int=3_072 , snake_case_ : int="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : str=0.1 , snake_case_ : str=512 , snake_case_ : str=2 , snake_case_ : str=0.02 , snake_case_ : str=1E-1_2 , snake_case_ : Optional[Any]=1 , snake_case_ : Union[str, Any]=0 , snake_case_ : Optional[int]=2 , snake_case_ : List[Any]="absolute" , snake_case_ : Optional[Any]=True , snake_case_ : Union[str, Any]=None , **snake_case_ : List[str] , ): super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) snake_case__ : Tuple = vocab_size snake_case__ : Union[str, Any] = hidden_size snake_case__ : Dict = num_hidden_layers snake_case__ : Tuple = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = intermediate_size snake_case__ : List[str] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : Any = type_vocab_size snake_case__ : Optional[Any] = initializer_range snake_case__ : Tuple = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : int = use_cache snake_case__ : List[Any] = classifier_dropout class UpperCAmelCase_ ( _a ): """simple docstring""" @property def lowerCamelCase ( self : List[Any] ): if self.task == "multiple-choice": snake_case__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case__ : Optional[Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
35
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu __a = [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str: snake_case__ : Union[str, Any] = True while ask_again: snake_case__ : Optional[Any] = input(_lowerCAmelCase ) try: if default is not None and len(_lowerCAmelCase ) == 0: return default return convert_value(_lowerCAmelCase ) if convert_value is not None else result except Exception: if error_message is not None: print(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=[] , _lowerCAmelCase=None , _lowerCAmelCase=0 ) -> Optional[Any]: snake_case__ : Tuple = BulletMenu(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Any = menu.run(default_choice=_lowerCAmelCase ) return convert_value(_lowerCAmelCase ) if convert_value is not None else result def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[int] = int(_lowerCAmelCase ) return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = int(_lowerCAmelCase ) return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] ) def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : str = int(_lowerCAmelCase ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def __snake_case( _lowerCAmelCase ) -> str: snake_case__ : Dict = int(_lowerCAmelCase ) return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Dict = int(_lowerCAmelCase ) return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] ) def __snake_case( _lowerCAmelCase ) -> Dict: return {"yes": True, "no": False}[value.lower()] class UpperCAmelCase_ ( argparse.RawDescriptionHelpFormatter ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : Tuple = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Optional[int] = usage.replace("""<command> [<args>] """ , """""" ) return usage
35
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __a = get_logger(__name__) class UpperCAmelCase_ : """simple docstring""" def __init__( self : Any , snake_case_ : Optional[str] = None ): snake_case__ : List[Any] = ( os.path.join(snake_case_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) snake_case__ : Any = Extractor def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" snake_case__ : str = os.path.abspath(snake_case_ ) return os.path.join(self.extract_dir , hash_url_to_filename(snake_case_ ) ) def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : bool ): return force_extract or ( not os.path.isfile(snake_case_ ) and not (os.path.isdir(snake_case_ ) and os.listdir(snake_case_ )) ) def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : bool = False ): snake_case__ : Dict = self.extractor.infer_extractor_format(snake_case_ ) if not extractor_format: return input_path snake_case__ : List[Any] = self._get_output_path(snake_case_ ) if self._do_extract(snake_case_ , snake_case_ ): self.extractor.extract(snake_case_ , snake_case_ , snake_case_ ) return output_path class UpperCAmelCase_ ( _a ): """simple docstring""" @classmethod @abstractmethod def lowerCamelCase ( cls : Optional[int] , snake_case_ : Union[Path, str] , **snake_case_ : Tuple ): ... @staticmethod @abstractmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): ... class UpperCAmelCase_ ( _a , _a ): """simple docstring""" lowercase = [] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : int ): with open(snake_case_ , """rb""" ) as f: return f.read(snake_case_ ) @classmethod def lowerCamelCase ( cls : Any , snake_case_ : Union[Path, str] , snake_case_ : bytes = b"" ): if not magic_number: snake_case__ : Optional[Any] = max(len(snake_case_ ) for cls_magic_number in cls.magic_numbers ) try: snake_case__ : List[str] = cls.read_magic_number(snake_case_ , snake_case_ ) except OSError: return False return any(magic_number.startswith(snake_case_ ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase_ ( _a ): """simple docstring""" @classmethod def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : Union[Path, str] , **snake_case_ : Union[str, Any] ): return tarfile.is_tarfile(snake_case_ ) @staticmethod def lowerCamelCase ( snake_case_ : List[str] , snake_case_ : Optional[Any] ): def resolved(snake_case_ : str ) -> str: return os.path.realpath(os.path.abspath(snake_case_ ) ) def badpath(snake_case_ : str , snake_case_ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(snake_case_ , snake_case_ ) ).startswith(snake_case_ ) def badlink(snake_case_ : str , snake_case_ : str ) -> bool: # Links are interpreted relative to the directory containing the link snake_case__ : List[Any] = resolved(os.path.join(snake_case_ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=snake_case_ ) snake_case__ : Optional[int] = resolved(snake_case_ ) for finfo in members: if badpath(finfo.name , snake_case_ ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(snake_case_ , snake_case_ ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(snake_case_ , snake_case_ ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : List[Any] = tarfile.open(snake_case_ ) tar_file.extractall(snake_case_ , members=TarExtractor.safemembers(snake_case_ , snake_case_ ) ) tar_file.close() class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\x1F\x8B"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): with gzip.open(snake_case_ , """rb""" ) as gzip_file: with open(snake_case_ , """wb""" ) as extracted_file: shutil.copyfileobj(snake_case_ , snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def lowerCamelCase ( cls : int , snake_case_ : Union[Path, str] , snake_case_ : bytes = b"" ): if super().is_extractable(snake_case_ , magic_number=snake_case_ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(snake_case_ , """rb""" ) as fp: snake_case__ : Any = _EndRecData(snake_case_ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: snake_case__ : List[str] = fp.read(snake_case_ ) # CD is where we expect it to be if len(snake_case_ ) == sizeCentralDir: snake_case__ : Dict = struct.unpack(snake_case_ , snake_case_ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): os.makedirs(snake_case_ , exist_ok=snake_case_ ) with zipfile.ZipFile(snake_case_ , """r""" ) as zip_file: zip_file.extractall(snake_case_ ) zip_file.close() class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): with lzma.open(snake_case_ ) as compressed_file: with open(snake_case_ , """wb""" ) as extracted_file: shutil.copyfileobj(snake_case_ , snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Tuple = rarfile.RarFile(snake_case_ ) rf.extractall(snake_case_ ) rf.close() class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\x28\xb5\x2F\xFD"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd snake_case__ : Dict = zstd.ZstdDecompressor() with open(snake_case_ , """rb""" ) as ifh, open(snake_case_ , """wb""" ) as ofh: dctx.copy_stream(snake_case_ , snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\x42\x5A\x68"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): with bza.open(snake_case_ , """rb""" ) as compressed_file: with open(snake_case_ , """wb""" ) as extracted_file: shutil.copyfileobj(snake_case_ , snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(snake_case_ , exist_ok=snake_case_ ) with pyazr.SevenZipFile(snake_case_ , """r""" ) as archive: archive.extractall(snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = [b"\x04\x22\x4D\x18"] @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] ): if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(snake_case_ , """rb""" ) as compressed_file: with open(snake_case_ , """wb""" ) as extracted_file: shutil.copyfileobj(snake_case_ , snake_case_ ) class UpperCAmelCase_ : """simple docstring""" lowercase = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def lowerCamelCase ( cls : List[str] ): return max( len(snake_case_ ) for extractor in cls.extractors.values() if issubclass(snake_case_ , snake_case_ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def lowerCamelCase ( snake_case_ : Union[Path, str] , snake_case_ : int ): try: return MagicNumberBaseExtractor.read_magic_number(snake_case_ , magic_number_length=snake_case_ ) except OSError: return b"" @classmethod def lowerCamelCase ( cls : int , snake_case_ : Union[Path, str] , snake_case_ : bool = False ): warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=snake_case_ , ) snake_case__ : str = cls.infer_extractor_format(snake_case_ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def lowerCamelCase ( cls : Optional[int] , snake_case_ : Union[Path, str] ): # <Added version="2.4.0"/> snake_case__ : str = cls._get_magic_number_max_length() snake_case__ : Dict = cls._read_magic_number(snake_case_ , snake_case_ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(snake_case_ , magic_number=snake_case_ ): return extractor_format @classmethod def lowerCamelCase ( cls : Any , snake_case_ : Union[Path, str] , snake_case_ : Union[Path, str] , snake_case_ : Optional[str] = None , snake_case_ : Optional[BaseExtractor] = "deprecated" , ): os.makedirs(os.path.dirname(snake_case_ ) , exist_ok=snake_case_ ) # Prevent parallel extractions snake_case__ : str = str(Path(snake_case_ ).with_suffix(""".lock""" ) ) with FileLock(snake_case_ ): shutil.rmtree(snake_case_ , ignore_errors=snake_case_ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(snake_case_ , snake_case_ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=snake_case_ , ) snake_case__ : int = extractor if extractor != """deprecated""" else extractor_format else: snake_case__ : str = cls.extractors[extractor_format] return extractor.extract(snake_case_ , snake_case_ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=snake_case_ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(snake_case_ ): return extractor.extract(snake_case_ , snake_case_ )
35
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
1
'''simple docstring''' import collections import os import re from pathlib import Path __a = "src/transformers" # Matches is_xxx_available() __a = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} __a = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __a = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available __a = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") __a = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __a = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", __a = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], __a = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo __a = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: __a = re.compile(R"^\s*try:") # Catches a line with else: __a = re.compile(R"^\s*else:") def __snake_case( _lowerCAmelCase ) -> int: if _re_test_backend.search(_lowerCAmelCase ) is None: return None snake_case__ : List[str] = [b[0] for b in _re_backend.findall(_lowerCAmelCase )] backends.sort() return "_and_".join(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case__ : List[str] = f.readlines() snake_case__ : List[Any] = 0 while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_lowerCAmelCase ): return None # First grab the objects without a specific backend in _import_structure snake_case__ : Tuple = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case__ : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_lowerCAmelCase ): snake_case__ : Any = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0] snake_case__ : int = re.findall(r"""\[([^\]]+)\]""" , _lowerCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case__ : Any = _re_import_struct_key_value.search(_lowerCAmelCase ) if single_line_import_search is not None: snake_case__ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCAmelCase ) > 0] objects.extend(_lowerCAmelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case__ : Optional[int] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case__ : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case__ : Optional[Any] = lines[line_index] if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None: snake_case__ : List[str] = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(""", """ ) snake_case__ : Union[str, Any] = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0] objects.extend(_lowerCAmelCase ) elif _re_between_brackets.search(_lowerCAmelCase ) is not None: snake_case__ : Dict = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(""", """ ) snake_case__ : str = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0] objects.extend(_lowerCAmelCase ) elif _re_quote_object.search(_lowerCAmelCase ) is not None: objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case__ : Any = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case__ : List[str] = [] while ( line_index < len(_lowerCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case__ : Tuple = lines[line_index] snake_case__ : Tuple = _re_import.search(_lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case__ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_lowerCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. snake_case__ : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case__ : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case__ : Any = lines[line_index] snake_case__ : Optional[Any] = _re_import.search(_lowerCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case__ : Any = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: def find_duplicates(_lowerCAmelCase ): return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case__ : Optional[Any] = [] for key in import_dict_objects.keys(): snake_case__ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) snake_case__ : List[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case__ : str = """base imports""" if key == """none""" else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def __snake_case( ) -> int: snake_case__ : Any = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Optional[int] = os.path.join(_lowerCAmelCase , """__init__.py""" ) snake_case__ : List[str] = parse_init(_lowerCAmelCase ) if objects is not None: snake_case__ : Union[str, Any] = analyze_results(*_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: snake_case__ : Optional[int] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("""\n""".join(_lowerCAmelCase ) ) if len(_lowerCAmelCase ) > 0: raise ValueError("""\n\n""".join(_lowerCAmelCase ) ) def __snake_case( ) -> List[str]: snake_case__ : int = [] for path, directories, files in os.walk(_lowerCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_lowerCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case__ : str = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) ) snake_case__ : int = short_path.replace(os.path.sep , """.""" ) submodules.append(_lowerCAmelCase ) for fname in files: if fname == "__init__.py": continue snake_case__ : Dict = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) ) snake_case__ : Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_lowerCAmelCase ) return submodules __a = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def __snake_case( ) -> Any: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case__ : Optional[Any] = direct_transformers_import(_lowerCAmelCase ) snake_case__ : Optional[int] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_lowerCAmelCase , """__init__.py""" ) , """r""" ) as f: snake_case__ : int = f.read() import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , _lowerCAmelCase ) ) ) snake_case__ : List[str] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_lowerCAmelCase ) > 0: snake_case__ : Optional[int] = """\n""".join(f"- {module}" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f"{list_of_modules}\n" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
35
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Dict = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : int = """.""".join(_lowerCAmelCase ) return test_module_path def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_module_path(_lowerCAmelCase ) snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase ) return test_module def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = [] snake_case__ : Any = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Any = get_test_classes(_lowerCAmelCase ) snake_case__ : Optional[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Optional[int] = test_class() if hasattr(_lowerCAmelCase , """setUp""" ): test.setUp() snake_case__ : Any = None if hasattr(_lowerCAmelCase , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Tuple = test.model_tester.__class__ return model_tester def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = get_model_classes(_lowerCAmelCase ) snake_case__ : Any = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase ) snake_case__ : str = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
35
'''simple docstring''' import string from math import logaa def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]: snake_case__ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' snake_case__ : Any = corpus_without_punctuation.split("""\n""" ) snake_case__ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase )) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float: return round(tf * idf , 3 )
35
1
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) __a = logging.getLogger(__name__) @dataclass(frozen=_a ) class UpperCAmelCase_ : """simple docstring""" lowercase = 42 lowercase = 42 lowercase = None lowercase = None lowercase = None @dataclass(frozen=_a ) class UpperCAmelCase_ : """simple docstring""" lowercase = 42 lowercase = None lowercase = None lowercase = None lowercase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = 42 def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : str , snake_case_ : Optional[int] = None , snake_case_ : Dict=False , snake_case_ : bool = False , ): snake_case__ : Tuple = hans_processors[task]() snake_case__ : List[Any] = os.path.join( snake_case_ , """cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(snake_case_ ) , snake_case_ , ) , ) snake_case__ : int = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) snake_case__ , snake_case__ : Tuple = label_list[2], label_list[1] snake_case__ : str = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. snake_case__ : Optional[Any] = cached_features_file + """.lock""" with FileLock(snake_case_ ): if os.path.exists(snake_case_ ) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}" ) snake_case__ : Union[str, Any] = torch.load(snake_case_ ) else: logger.info(f"Creating features from dataset file at {data_dir}" ) snake_case__ : Union[str, Any] = ( processor.get_dev_examples(snake_case_ ) if evaluate else processor.get_train_examples(snake_case_ ) ) logger.info("""Training examples: %s""" , len(snake_case_ ) ) snake_case__ : Optional[Any] = hans_convert_examples_to_features(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) logger.info("""Saving features into cached file %s""" , snake_case_ ) torch.save(self.features , snake_case_ ) def __len__( self : Any ): return len(self.features ) def __getitem__( self : int , snake_case_ : Optional[int] ): return self.features[i] def lowerCamelCase ( self : Optional[int] ): return self.label_list if is_tf_available(): import tensorflow as tf class UpperCAmelCase_ : """simple docstring""" lowercase = 42 def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : str , snake_case_ : Optional[int] = 128 , snake_case_ : Union[str, Any]=False , snake_case_ : bool = False , ): snake_case__ : int = hans_processors[task]() snake_case__ : List[str] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) snake_case__ , snake_case__ : Any = label_list[2], label_list[1] snake_case__ : Optional[int] = label_list snake_case__ : Tuple = processor.get_dev_examples(snake_case_ ) if evaluate else processor.get_train_examples(snake_case_ ) snake_case__ : int = hans_convert_examples_to_features(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ): if ex_index % 10_000 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(snake_case_ )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) snake_case__ : int = tf.data.Dataset.from_generator( snake_case_ , ( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) , ( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def lowerCamelCase ( self : Optional[Any] ): return self.dataset def __len__( self : List[str] ): return len(self.features ) def __getitem__( self : List[Any] , snake_case_ : Dict ): return self.features[i] def lowerCamelCase ( self : Dict ): return self.label_list class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Dict , snake_case_ : Tuple ): return self._create_examples(self._read_tsv(os.path.join(snake_case_ , """heuristics_train_set.txt""" ) ) , """train""" ) def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] ): return self._create_examples(self._read_tsv(os.path.join(snake_case_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" ) def lowerCamelCase ( self : List[Any] ): return ["contradiction", "entailment", "neutral"] def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] ): snake_case__ : Optional[int] = [] for i, line in enumerate(snake_case_ ): if i == 0: continue snake_case__ : Tuple = """%s-%s""" % (set_type, line[0]) snake_case__ : Tuple = line[5] snake_case__ : Optional[Any] = line[6] snake_case__ : Tuple = line[7][2:] if line[7].startswith("""ex""" ) else line[7] snake_case__ : Optional[int] = line[0] examples.append(InputExample(guid=snake_case_ , text_a=snake_case_ , text_b=snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) return examples def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str: snake_case__ : str = {label: i for i, label in enumerate(_lowerCAmelCase )} snake_case__ : List[str] = [] for ex_index, example in tqdm.tqdm(enumerate(_lowerCAmelCase ) , desc="""convert examples to features""" ): if ex_index % 10_000 == 0: logger.info("""Writing example %d""" % (ex_index) ) snake_case__ : Union[str, Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , truncation=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , ) snake_case__ : Tuple = label_map[example.label] if example.label in label_map else 0 snake_case__ : Tuple = int(example.pairID ) features.append(InputFeatures(**_lowerCAmelCase , label=_lowerCAmelCase , pairID=_lowerCAmelCase ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(f"guid: {example}" ) logger.info(f"features: {features[i]}" ) return features __a = { "hans": 3, } __a = { "hans": HansProcessor, }
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
'''simple docstring''' from __future__ import annotations __a = list[list[int]] # assigning initial values to the grid __a = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __a = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def __snake_case( _lowerCAmelCase ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def __snake_case( _lowerCAmelCase ) -> Matrix | None: if location := find_empty_location(_lowerCAmelCase ): snake_case__ , snake_case__ : str = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Optional[Any] = digit if sudoku(_lowerCAmelCase ) is not None: return grid snake_case__ : Any = 0 return None def __snake_case( _lowerCAmelCase ) -> None: for row in grid: for cell in row: print(_lowerCAmelCase , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") __a = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
35
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py __a = "src/transformers" __a = "docs/source/en/tasks" def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case__ : str = f.readlines() # Find the start prompt. snake_case__ : int = 0 while not lines[start_index].startswith(_lowerCAmelCase ): start_index += 1 start_index += 1 snake_case__ : str = start_index while not lines[end_index].startswith(_lowerCAmelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(TRANSFORMERS_PATH) __a = { "asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, "audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, "multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, "document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). __a = { "summarization.md": ("nllb",), "translation.md": ("nllb",), } def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Tuple = TASK_GUIDE_TO_MODELS[task_guide] snake_case__ : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCAmelCase , set() ) snake_case__ : List[str] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n" def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]: snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = _find_text_in_file( filename=os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) snake_case__ : str = get_model_list_for_task(_lowerCAmelCase ) if current_list != new_list: if overwrite: with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`" """ to fix this.""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __a = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
35
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : List[str] = 0.01 with locka.acquire(): with pytest.raises(_lowerCAmelCase ): snake_case__ : str = time.time() locka.acquire(_lowerCAmelCase ) assert time.time() - _start > timeout def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = """a""" * 1_000 + """.lock""" snake_case__ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(_lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCAmelCase ): locka.acquire(0 )
35
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a , _a ): """simple docstring""" lowercase = "maskformer-swin" lowercase = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Tuple , snake_case_ : str=224 , snake_case_ : List[Any]=4 , snake_case_ : int=3 , snake_case_ : Dict=96 , snake_case_ : Any=[2, 2, 6, 2] , snake_case_ : str=[3, 6, 12, 24] , snake_case_ : Optional[int]=7 , snake_case_ : str=4.0 , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : str=0.1 , snake_case_ : Optional[int]="gelu" , snake_case_ : Union[str, Any]=False , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1E-5 , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , **snake_case_ : Any , ): super().__init__(**snake_case_ ) snake_case__ : Any = image_size snake_case__ : List[str] = patch_size snake_case__ : Dict = num_channels snake_case__ : Optional[Any] = embed_dim snake_case__ : Dict = depths snake_case__ : str = len(snake_case_ ) snake_case__ : Optional[int] = num_heads snake_case__ : Optional[Any] = window_size snake_case__ : Dict = mlp_ratio snake_case__ : Optional[int] = qkv_bias snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : Union[str, Any] = attention_probs_dropout_prob snake_case__ : Dict = drop_path_rate snake_case__ : Union[str, Any] = hidden_act snake_case__ : int = use_absolute_embeddings snake_case__ : List[str] = layer_norm_eps snake_case__ : List[Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : int = int(embed_dim * 2 ** (len(snake_case_ ) - 1) ) snake_case__ : int = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(snake_case_ ) + 1 )] snake_case__ , snake_case__ : List[Any] = get_aligned_output_features_output_indices( out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
35
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __snake_case( ) -> List[str]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : List[str] = np.full((len(_lowerCAmelCase ), sequence_length, 2) , _lowerCAmelCase ) else: snake_case__ : Union[str, Any] = np.full((len(_lowerCAmelCase ), sequence_length) , _lowerCAmelCase ) for i, tensor in enumerate(_lowerCAmelCase ): if padding_side == "right": if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : int = tensor[:sequence_length] else: snake_case__ : Tuple = tensor[:sequence_length] else: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Dict = tensor[:sequence_length] else: snake_case__ : Optional[int] = tensor[:sequence_length] return out_tensor.tolist() def __snake_case( _lowerCAmelCase ) -> Any: snake_case__ : Tuple = ord(_lowerCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True snake_case__ : str = unicodedata.category(_lowerCAmelCase ) if cat.startswith("""P""" ): return True return False @dataclass class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = 42 lowercase = True lowercase = None lowercase = None lowercase = -1_00 lowercase = "pt" def lowerCamelCase ( self : Dict , snake_case_ : str ): import torch snake_case__ : Any = """label""" if """label""" in features[0].keys() else """labels""" snake_case__ : Any = [feature[label_name] for feature in features] if label_name in features[0].keys() else None snake_case__ : Tuple = self.tokenizer.pad( snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch snake_case__ : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1] snake_case__ : Tuple = self.tokenizer.padding_side if padding_side == "right": snake_case__ : Union[str, Any] = [ list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels ] else: snake_case__ : Optional[int] = [ [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels ] snake_case__ : Any = [feature["""ner_tags"""] for feature in features] snake_case__ : Union[str, Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = [feature["""original_entity_spans"""] for feature in features] snake_case__ : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ ) snake_case__ : Tuple = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
35
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[Any] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : Dict = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["ConditionalDetrFeatureExtractor"] __a = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool: # Base Case if index == len(_lowerCAmelCase ): return True # Recursive Step for i in range(_lowerCAmelCase ): if valid_coloring(graph[index] , _lowerCAmelCase , _lowerCAmelCase ): # Color current vertex snake_case__ : List[Any] = i # Validate coloring if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 ): return True # Backtrack snake_case__ : Optional[Any] = -1 return False def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: snake_case__ : Dict = [-1] * len(_lowerCAmelCase ) if util_color(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 ): return colored_vertices return []
35
'''simple docstring''' import numpy as np from transformers import Pipeline def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase ) snake_case__ : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ): snake_case__ : Optional[int] = {} if "second_text" in kwargs: snake_case__ : Union[str, Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : List[Any] ): snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy() snake_case__ : List[str] = softmax(snake_case_ ) snake_case__ : List[str] = np.argmax(snake_case_ ) snake_case__ : List[str] = self.model.config.idalabel[best_class] snake_case__ : Optional[int] = probabilities[best_class].item() snake_case__ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
35
1
'''simple docstring''' from math import factorial def __snake_case( _lowerCAmelCase = 20 ) -> int: snake_case__ : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... snake_case__ : Dict = n // 2 return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __a = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
35
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __snake_case( _lowerCAmelCase ) -> Any: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __snake_case( _lowerCAmelCase ) -> List[str]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(R"| /\ | |- | |- |--| |\ /| |-") print(R"|/ \| |- |_ |_ |__| | \/ | |_") __a = 1 while K: __a = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) __a = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
35
1
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = "sshleifer/bart-tiny-random" __a = "patrickvonplaten/t5-tiny-random" @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return AutoConfig.from_pretrained(snake_case_ ) def lowerCamelCase ( self : Dict ): snake_case__ , *snake_case__ : List[str] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowerCamelCase ( self : Tuple ): snake_case__ , *snake_case__ : Tuple = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ , *snake_case__ : Tuple = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowerCamelCase ( self : Dict ): snake_case__ , *snake_case__ : Optional[int] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowerCamelCase ( self : Tuple ): with self.assertRaises(snake_case_ ): create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=snake_case_ , d=snake_case_ )
35
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
35
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar __a = TypeVar("T") __a = TypeVar("U") class UpperCAmelCase_ ( Generic[T, U] ): """simple docstring""" def __init__( self : Any , snake_case_ : T | None , snake_case_ : U | None ): snake_case__ : Dict = key snake_case__ : Tuple = val snake_case__ : DoubleLinkedListNode[T, U] | None = None snake_case__ : DoubleLinkedListNode[T, U] | None = None def __repr__( self : int ): return ( f"Node: key: {self.key}, val: {self.val}, " f"has next: {bool(self.next )}, has prev: {bool(self.prev )}" ) class UpperCAmelCase_ ( Generic[T, U] ): """simple docstring""" def __init__( self : Tuple ): snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ ) snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ ) snake_case__ , snake_case__ : Tuple = self.rear, self.head def __repr__( self : List[str] ): snake_case__ : Dict = ["""DoubleLinkedList"""] snake_case__ : Any = self.head while node.next is not None: rep.append(str(snake_case_ ) ) snake_case__ : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(snake_case_ ) def lowerCamelCase ( self : Optional[int] , snake_case_ : DoubleLinkedListNode[T, U] ): snake_case__ : List[str] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None snake_case__ : Tuple = node snake_case__ : Optional[Any] = previous snake_case__ : int = node snake_case__ : List[str] = self.rear def lowerCamelCase ( self : List[str] , snake_case_ : DoubleLinkedListNode[T, U] ): if node.prev is None or node.next is None: return None snake_case__ : Optional[int] = node.next snake_case__ : List[str] = node.prev snake_case__ : Optional[int] = None snake_case__ : Optional[Any] = None return node class UpperCAmelCase_ ( Generic[T, U] ): """simple docstring""" lowercase = {} def __init__( self : str , snake_case_ : int ): snake_case__ : DoubleLinkedList[T, U] = DoubleLinkedList() snake_case__ : Tuple = capacity snake_case__ : Dict = 0 snake_case__ : Dict = 0 snake_case__ : List[Any] = 0 snake_case__ : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self : Tuple ): return ( f"CacheInfo(hits={self.hits}, misses={self.miss}, " f"capacity={self.capacity}, current size={self.num_keys})" ) def __contains__( self : Optional[int] , snake_case_ : T ): return key in self.cache def lowerCamelCase ( self : Dict , snake_case_ : T ): # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 snake_case__ : DoubleLinkedListNode[T, U] = self.cache[key] snake_case__ : List[str] = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(snake_case_ ) return node.val self.miss += 1 return None def lowerCamelCase ( self : List[str] , snake_case_ : T , snake_case_ : U ): if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity snake_case__ : Tuple = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(snake_case_ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 snake_case__ : Optional[int] = DoubleLinkedListNode(snake_case_ , snake_case_ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value snake_case__ : List[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list snake_case__ : int = value self.list.add(snake_case_ ) @classmethod def lowerCamelCase ( cls : Dict , snake_case_ : int = 128 ): def cache_decorator_inner(snake_case_ : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*snake_case_ : T ) -> U: if func not in cls.decorator_function_to_instance_map: snake_case__ : Union[str, Any] = LRUCache(snake_case_ ) snake_case__ : str = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: snake_case__ : Any = func(*snake_case_ ) cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(snake_case_ , """cache_info""" , snake_case_ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Optional[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ): warnings.warn( """The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use VideoMAEImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
1
'''simple docstring''' class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : int ): snake_case__ : Tuple = n snake_case__ : int = [None] * self.n snake_case__ : Optional[int] = 0 # index of the first element snake_case__ : List[Any] = 0 snake_case__ : str = 0 def __len__( self : Optional[Any] ): return self.size def lowerCamelCase ( self : Dict ): return self.size == 0 def lowerCamelCase ( self : Optional[int] ): return False if self.is_empty() else self.array[self.front] def lowerCamelCase ( self : Optional[int] , snake_case_ : str ): if self.size >= self.n: raise Exception("""QUEUE IS FULL""" ) snake_case__ : Optional[int] = data snake_case__ : Optional[int] = (self.rear + 1) % self.n self.size += 1 return self def lowerCamelCase ( self : List[Any] ): if self.size == 0: raise Exception("""UNDERFLOW""" ) snake_case__ : List[str] = self.array[self.front] snake_case__ : str = None snake_case__ : Any = (self.front + 1) % self.n self.size -= 1 return temp
35
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
1
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = (DPMSolverSDEScheduler,) lowercase = 10 def lowerCamelCase ( self : str , **snake_case_ : Union[str, Any] ): snake_case__ : Optional[Any] = { """num_train_timesteps""": 1_100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**snake_case_ ) return config def lowerCamelCase ( self : Any ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case_ ) def lowerCamelCase ( self : Tuple ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ ) def lowerCamelCase ( self : int ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=snake_case_ ) def lowerCamelCase ( self : int ): snake_case__ : List[Any] = self.scheduler_classes[0] snake_case__ : List[Any] = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**snake_case_ ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : str = self.dummy_model() snake_case__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[str] = sample.to(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Optional[Any] = scheduler.scale_model_input(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = model(snake_case_ , snake_case_ ) snake_case__ : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = output.prev_sample snake_case__ : Any = torch.sum(torch.abs(snake_case_ ) ) snake_case__ : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCamelCase ( self : List[Any] ): snake_case__ : str = self.scheduler_classes[0] snake_case__ : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) snake_case__ : List[Any] = scheduler_class(**snake_case_ ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ : Any = self.dummy_model() snake_case__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ : List[str] = sample.to(snake_case_ ) for i, t in enumerate(scheduler.timesteps ): snake_case__ : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ ) snake_case__ : Dict = model(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = output.prev_sample snake_case__ : Tuple = torch.sum(torch.abs(snake_case_ ) ) snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCamelCase ( self : int ): snake_case__ : Dict = self.scheduler_classes[0] snake_case__ : List[str] = self.get_scheduler_config() snake_case__ : Any = scheduler_class(**snake_case_ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ ) snake_case__ : Tuple = self.dummy_model() snake_case__ : Any = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: snake_case__ : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = model(snake_case_ , snake_case_ ) snake_case__ : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Any = output.prev_sample snake_case__ : int = torch.sum(torch.abs(snake_case_ ) ) snake_case__ : str = torch.mean(torch.abs(snake_case_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCamelCase ( self : Dict ): snake_case__ : Tuple = self.scheduler_classes[0] snake_case__ : Dict = self.get_scheduler_config() snake_case__ : List[str] = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ ) scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ ) snake_case__ : int = self.dummy_model() snake_case__ : List[str] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma snake_case__ : Tuple = sample.to(snake_case_ ) for t in scheduler.timesteps: snake_case__ : Optional[int] = scheduler.scale_model_input(snake_case_ , snake_case_ ) snake_case__ : Dict = model(snake_case_ , snake_case_ ) snake_case__ : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Any = output.prev_sample snake_case__ : Dict = torch.sum(torch.abs(snake_case_ ) ) snake_case__ : List[Any] = torch.mean(torch.abs(snake_case_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) snake_case__ : Tuple = sorted(string.lower() ) return len(_lowerCAmelCase ) == len(set(_lowerCAmelCase ) ) if __name__ == "__main__": __a = input("Enter a string ").strip() __a = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
35
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
1
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Tuple = set() snake_case__ : int = [] def parse_line(_lowerCAmelCase ): for line in fp: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : Tuple = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(_lowerCAmelCase ) > 0: snake_case__ : List[Any] = """\n""".join(_lowerCAmelCase ) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets ): selected_warnings.add(_lowerCAmelCase ) buffer.clear() continue else: snake_case__ : int = line.strip() buffer.append(_lowerCAmelCase ) if from_gh: for filename in os.listdir(_lowerCAmelCase ): snake_case__ : int = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) if not os.path.isdir(_lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCAmelCase ) as fp: parse_line(_lowerCAmelCase ) else: try: with zipfile.ZipFile(_lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCAmelCase ) as fp: parse_line(_lowerCAmelCase ) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: snake_case__ : int = set() snake_case__ : Dict = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCAmelCase , _lowerCAmelCase ) ) return selected_warnings if __name__ == "__main__": def __snake_case( _lowerCAmelCase ) -> Tuple: return values.split(""",""" ) __a = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) __a = parser.parse_args() __a = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __a = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __a = extract_warnings(args.output_dir, args.targets) __a = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
35
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Dict = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : int = """.""".join(_lowerCAmelCase ) return test_module_path def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_module_path(_lowerCAmelCase ) snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase ) return test_module def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = [] snake_case__ : Any = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Any = get_test_classes(_lowerCAmelCase ) snake_case__ : Optional[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Optional[int] = test_class() if hasattr(_lowerCAmelCase , """setUp""" ): test.setUp() snake_case__ : Any = None if hasattr(_lowerCAmelCase , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Tuple = test.model_tester.__class__ return model_tester def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = get_model_classes(_lowerCAmelCase ) snake_case__ : Any = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase ) snake_case__ : str = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
35
1
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' from collections.abc import Sequence def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False ) -> float: if not arr: return 0 snake_case__ : Optional[Any] = 0 if allow_empty_subarrays else float("""-inf""" ) snake_case__ : List[str] = 0.0 for num in arr: snake_case__ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) snake_case__ : Optional[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __a = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"{max_subarray_sum(nums) = }")
35
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import os import pytest from attr import dataclass __a = "us-east-1" # defaults region @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = 42 lowercase = "arn:aws:iam::558105141721:role/sagemaker_execution_role" lowercase = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_00, "save_steps": 55_00, } lowercase = {**hyperparameters, "max_steps": 10_00} @property def lowerCamelCase ( self : List[str] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCamelCase ( self : Union[str, Any] ): return f"{self.framework}-transfromers-test" @property def lowerCamelCase ( self : List[str] ): return f"./tests/sagemaker/scripts/{self.framework}" @property def lowerCamelCase ( self : List[str] ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Dict = SageMakerTestEnvironment(framework=request.cls.framework )
35
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __a = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["LayoutLMv2FeatureExtractor"] __a = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json", } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align_text_model" def __init__( self : Dict , snake_case_ : Any=30_522 , snake_case_ : Tuple=768 , snake_case_ : Any=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Dict="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : str=0.1 , snake_case_ : List[str]=512 , snake_case_ : str=2 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[Any]=1E-1_2 , snake_case_ : List[Any]=0 , snake_case_ : Union[str, Any]="absolute" , snake_case_ : List[Any]=True , **snake_case_ : List[Any] , ): super().__init__(**snake_case_ ) snake_case__ : str = vocab_size snake_case__ : str = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : Tuple = hidden_act snake_case__ : int = intermediate_size snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : Tuple = max_position_embeddings snake_case__ : Tuple = type_vocab_size snake_case__ : Dict = initializer_range snake_case__ : str = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = use_cache snake_case__ : Tuple = pad_token_id @classmethod def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Optional[int] ): cls._set_token_in_kwargs(snake_case_ ) snake_case__ , snake_case__ : List[str] = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": snake_case__ : Optional[Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case_ , **snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align_vision_model" def __init__( self : Union[str, Any] , snake_case_ : int = 3 , snake_case_ : int = 600 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.25 , snake_case_ : str = "swish" , snake_case_ : int = 2_560 , snake_case_ : str = "mean" , snake_case_ : float = 0.02 , snake_case_ : float = 0.001 , snake_case_ : float = 0.99 , snake_case_ : float = 0.2 , **snake_case_ : Optional[int] , ): super().__init__(**snake_case_ ) snake_case__ : int = num_channels snake_case__ : Optional[int] = image_size snake_case__ : int = width_coefficient snake_case__ : str = depth_coefficient snake_case__ : Optional[int] = depth_divisor snake_case__ : Any = kernel_sizes snake_case__ : Optional[int] = in_channels snake_case__ : Optional[Any] = out_channels snake_case__ : Tuple = depthwise_padding snake_case__ : Tuple = strides snake_case__ : str = num_block_repeats snake_case__ : Any = expand_ratios snake_case__ : Any = squeeze_expansion_ratio snake_case__ : Tuple = hidden_act snake_case__ : int = hidden_dim snake_case__ : int = pooling_type snake_case__ : List[Any] = initializer_range snake_case__ : Optional[Any] = batch_norm_eps snake_case__ : Optional[int] = batch_norm_momentum snake_case__ : int = drop_connect_rate snake_case__ : int = sum(snake_case_ ) * 4 @classmethod def lowerCamelCase ( cls : str , snake_case_ : Union[str, os.PathLike] , **snake_case_ : List[Any] ): cls._set_token_in_kwargs(snake_case_ ) snake_case__ , snake_case__ : Optional[Any] = cls.get_config_dict(snake_case_ , **snake_case_ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": snake_case__ : Optional[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(snake_case_ , **snake_case_ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "align" lowercase = True def __init__( self : int , snake_case_ : List[Any]=None , snake_case_ : List[str]=None , snake_case_ : Tuple=640 , snake_case_ : List[Any]=1.0 , snake_case_ : List[str]=0.02 , **snake_case_ : Any , ): super().__init__(**snake_case_ ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: snake_case__ : Optional[Any] = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) snake_case__ : List[str] = AlignTextConfig(**snake_case_ ) snake_case__ : Optional[Any] = AlignVisionConfig(**snake_case_ ) snake_case__ : Dict = projection_dim snake_case__ : List[str] = temperature_init_value snake_case__ : Tuple = initializer_range @classmethod def lowerCamelCase ( cls : Union[str, Any] , snake_case_ : AlignTextConfig , snake_case_ : AlignVisionConfig , **snake_case_ : Optional[int] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ ) def lowerCamelCase ( self : List[Any] ): snake_case__ : int = copy.deepcopy(self.__dict__ ) snake_case__ : Union[str, Any] = self.text_config.to_dict() snake_case__ : List[str] = self.vision_config.to_dict() snake_case__ : List[str] = self.__class__.model_type return output
35
'''simple docstring''' import string from math import logaa def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]: snake_case__ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' snake_case__ : Any = corpus_without_punctuation.split("""\n""" ) snake_case__ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase )) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float: return round(tf * idf , 3 )
35
1
'''simple docstring''' import torch from diffusers import DiffusionPipeline class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : str , snake_case_ : Optional[int] , snake_case_ : int ): super().__init__() self.register_modules(unet=snake_case_ , scheduler=snake_case_ ) def __call__( self : Optional[Any] ): snake_case__ : Union[str, Any] = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) snake_case__ : List[Any] = 1 snake_case__ : Tuple = self.unet(snake_case_ , snake_case_ ).sample snake_case__ : Dict = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample snake_case__ : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ ) return result
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> int: require_version(deps[pkg] , _lowerCAmelCase )
35
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
1
'''simple docstring''' from __future__ import annotations from typing import Any def __snake_case( _lowerCAmelCase ) -> int: if not postfix_notation: return 0 snake_case__ : Tuple = {"""+""", """-""", """*""", """/"""} snake_case__ : list[Any] = [] for token in postfix_notation: if token in operations: snake_case__ , snake_case__ : Optional[int] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_lowerCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_a ): """simple docstring""" lowercase = ["keras_nlp"] def __init__( self : List[Any] , *snake_case_ : Dict , **snake_case_ : int ): requires_backends(self , ["""keras_nlp"""] )
35
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : List[str] = 0.01 with locka.acquire(): with pytest.raises(_lowerCAmelCase ): snake_case__ : str = time.time() locka.acquire(_lowerCAmelCase ) assert time.time() - _start > timeout def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = """a""" * 1_000 + """.lock""" snake_case__ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(_lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCAmelCase ): locka.acquire(0 )
35
1
'''simple docstring''' import os import jsonlines import numpy as np from tqdm import tqdm __a = 2048 __a = 4096 __a = 42 __a = os.environ.pop("PROCESS_TRAIN", "false") __a = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: def choose_first(_lowerCAmelCase , _lowerCAmelCase=False ): assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) == 1: snake_case__ : Optional[Any] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: snake_case__ : Any = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a snake_case__ : Union[str, Any] = {"""id""": example["""id"""]} snake_case__ : int = example["""annotations"""] snake_case__ : str = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: snake_case__ : Optional[Any] = ["""yes"""] if 1 in yes_no_answer else ["""no"""] snake_case__ : Dict = [] snake_case__ : int = [] snake_case__ : Optional[Any] = ["""<cls>"""] else: snake_case__ : Tuple = ["""short"""] snake_case__ : Tuple = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available snake_case__ : Any = ["""long"""] snake_case__ : List[str] = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCAmelCase ) snake_case__ : Tuple = [] answer.update(_lowerCAmelCase ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: snake_case__ : List[str] = True else: snake_case__ : str = False snake_case__ : Any = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , _lowerCAmelCase ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]: snake_case__ : Optional[Any] = _get_single_answer(_lowerCAmelCase ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ : Optional[Any] = example["""document"""]["""tokens"""] snake_case__ : List[str] = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(_lowerCAmelCase ), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples snake_case__ : Any = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 snake_case__ : List[str] = example["""document"""]["""tokens"""] snake_case__ : str = answer["""start_token"""] snake_case__ : List[str] = answer["""end_token"""] snake_case__ : Tuple = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 snake_case__ : int = """ """.join(context[start_token:end_token] ) # checking above code if assertion: snake_case__ : List[str] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] snake_case__ : List[str] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] snake_case__ : Optional[int] = """ """.join([old[i] for i in range(len(_lowerCAmelCase ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , _lowerCAmelCase , end="""\n""" ) print("""Old:""" , _lowerCAmelCase , end="""\n\n""" ) return { "context": " ".join(_lowerCAmelCase ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=True ) -> Optional[Any]: # overlap will be of doc_stride - q_len snake_case__ : Optional[int] = get_context_and_ans(_lowerCAmelCase , assertion=_lowerCAmelCase ) snake_case__ : int = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } snake_case__ : Union[str, Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids snake_case__ : int = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case__ : Tuple = [] snake_case__ : Dict = [] snake_case__ : Optional[Any] = input_ids[:q_len] snake_case__ : List[str] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride ) for i in doc_start_indices: snake_case__ : Any = i + max_length - q_len snake_case__ : str = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(_lowerCAmelCase ), "end_token": [-100] * len(_lowerCAmelCase ), "category": category, }, } snake_case__ : int = out["""context"""].split() snake_case__ : Tuple = splitted_context[answer["""end_token"""]] snake_case__ : int = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCAmelCase , ).input_ids ) snake_case__ : int = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCAmelCase ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token snake_case__ : Tuple = len(tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 snake_case__ : int = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive snake_case__ : Dict = answer["""start_token"""] snake_case__ : Optional[int] = answer["""end_token"""] if assertion: snake_case__ : Union[str, Any] = tokenizer.decode(_lowerCAmelCase ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , _lowerCAmelCase , end="""\n\n""" ) if len(_lowerCAmelCase ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } snake_case__ : Tuple = input_ids[:q_len] snake_case__ : Optional[int] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride ) snake_case__ : Dict = [] snake_case__ : List[str] = [] snake_case__ : int = [] snake_case__ : Any = [] # null, yes, no, long, short for i in doc_start_indices: snake_case__ : int = i + max_length - q_len snake_case__ : List[Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: snake_case__ : List[Any] = start_token - i + q_len snake_case__ : Dict = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: snake_case__ : Union[str, Any] = -100 snake_case__ : Any = -100 answers_category.append("""null""" ) snake_case__ : Tuple = inputs[-1][start_token : end_token + 1] answers_start_token.append(_lowerCAmelCase ) answers_end_token.append(_lowerCAmelCase ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(_lowerCAmelCase ) ) print("""Old:""" , tokenizer.decode(_lowerCAmelCase ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=False ) -> Optional[Any]: snake_case__ : int = get_strided_contexts_and_ans( _lowerCAmelCase , _lowerCAmelCase , doc_stride=_lowerCAmelCase , max_length=_lowerCAmelCase , assertion=_lowerCAmelCase , ) return example def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: with jsonlines.open(_lowerCAmelCase , """a""" ) as writer: for example in tqdm(_lowerCAmelCase , total=len(_lowerCAmelCase ) , desc="""Saving samples ... """ ): snake_case__ : Optional[Any] = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer __a = load_dataset("natural_questions") __a = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") __a = data["train" if PROCESS_TRAIN == "true" else "validation"] __a = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } __a = data.map(prepare_inputs, fn_kwargs=fn_kwargs) __a = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) __a = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
35
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __snake_case( ) -> List[str]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
'''simple docstring''' class UpperCAmelCase_ : """simple docstring""" def __init__( self : Dict , snake_case_ : list[int] ): snake_case__ : List[str] = len(snake_case_ ) snake_case__ : Union[str, Any] = [0] * len_array if len_array > 0: snake_case__ : str = array[0] for i in range(1 , snake_case_ ): snake_case__ : int = self.prefix_sum[i - 1] + array[i] def lowerCamelCase ( self : str , snake_case_ : int , snake_case_ : int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCamelCase ( self : Any , snake_case_ : int ): snake_case__ : Optional[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(snake_case_ ) return False if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = LDMTextToImagePipeline lowercase = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } lowercase = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } lowercase = TEXT_TO_IMAGE_BATCH_PARAMS lowercase = False def lowerCamelCase ( self : List[Any] ): torch.manual_seed(0 ) snake_case__ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) snake_case__ : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) snake_case__ : Optional[Any] = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) snake_case__ : Optional[Any] = CLIPTextModel(snake_case_ ) snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case__ : Any = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Optional[int]=0 ): if str(snake_case_ ).startswith("""mps""" ): snake_case__ : str = torch.manual_seed(snake_case_ ) else: snake_case__ : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) snake_case__ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowerCamelCase ( self : int ): snake_case__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case__ : List[Any] = self.get_dummy_components() snake_case__ : Any = LDMTextToImagePipeline(**snake_case_ ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case__ : Tuple = self.get_dummy_inputs(snake_case_ ) snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images snake_case__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) snake_case__ : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any]=torch.floataa , snake_case_ : Tuple=0 ): snake_case__ : Any = torch.manual_seed(snake_case_ ) snake_case__ : str = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 32, 32) ) snake_case__ : int = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) snake_case__ : List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowerCamelCase ( self : Tuple ): snake_case__ : Dict = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case__ : int = self.get_inputs(snake_case_ ) snake_case__ : Optional[Any] = pipe(**snake_case_ ).images snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) snake_case__ : List[Any] = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) snake_case__ : Any = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple=torch.floataa , snake_case_ : List[str]=0 ): snake_case__ : Tuple = torch.manual_seed(snake_case_ ) snake_case__ : Union[str, Any] = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 32, 32) ) snake_case__ : List[str] = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ ) snake_case__ : Optional[int] = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def lowerCamelCase ( self : List[Any] ): snake_case__ : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case__ : Tuple = self.get_inputs(snake_case_ ) snake_case__ : Tuple = pipe(**snake_case_ ).images[0] snake_case__ : int = load_numpy( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" ) snake_case__ : List[str] = np.abs(expected_image - image ).max() assert max_diff < 1E-3
35
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[Any] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : Dict = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
1
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 100 , ) -> float: snake_case__ : Any = x_start snake_case__ : List[Any] = fnc(_lowerCAmelCase ) snake_case__ : str = 0.0 for _ in range(_lowerCAmelCase ): # Approximates curve as a sequence of linear lines and sums their length snake_case__ : Tuple = (x_end - x_start) / steps + xa snake_case__ : str = fnc(_lowerCAmelCase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step snake_case__ : Optional[Any] = xa snake_case__ : Tuple = fxa return length if __name__ == "__main__": def __snake_case( _lowerCAmelCase ) -> Tuple: return math.sin(10 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") __a = 10 while i <= 10_0000: print(F"With {i} steps: {line_length(f, -10, 10, i)}") i *= 10
35
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar __a = TypeVar("KT") __a = TypeVar("VT") class UpperCAmelCase_ ( Generic[KT, VT] ): """simple docstring""" def __init__( self : Optional[Any] , snake_case_ : KT | str = "root" , snake_case_ : VT | None = None ): snake_case__ : Union[str, Any] = key snake_case__ : str = value snake_case__ : list[Node[KT, VT]] = [] def __repr__( self : Tuple ): return f"Node({self.key}: {self.value})" @property def lowerCamelCase ( self : Optional[Any] ): return len(self.forward ) class UpperCAmelCase_ ( Generic[KT, VT] ): """simple docstring""" def __init__( self : List[Any] , snake_case_ : float = 0.5 , snake_case_ : int = 16 ): snake_case__ : Node[KT, VT] = Node[KT, VT]() snake_case__ : Optional[Any] = 0 snake_case__ : Union[str, Any] = p snake_case__ : int = max_level def __str__( self : Optional[Any] ): snake_case__ : str = list(self ) if len(snake_case_ ) == 0: return f"SkipList(level={self.level})" snake_case__ : Optional[Any] = max((len(str(snake_case_ ) ) for item in items) , default=4 ) snake_case__ : Optional[Any] = max(snake_case_ , 4 ) + 4 snake_case__ : Optional[Any] = self.head snake_case__ : Dict = [] snake_case__ : Tuple = node.forward.copy() lines.append(f"[{node.key}]".ljust(snake_case_ , """-""" ) + """* """ * len(snake_case_ ) ) lines.append(""" """ * label_size + """| """ * len(snake_case_ ) ) while len(node.forward ) != 0: snake_case__ : Tuple = node.forward[0] lines.append( f"[{node.key}]".ljust(snake_case_ , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(snake_case_ ) ) snake_case__ : List[str] = node.forward lines.append("""None""".ljust(snake_case_ ) + """* """ * len(snake_case_ ) ) return f"SkipList(level={self.level})\n" + "\n".join(snake_case_ ) def __iter__( self : Tuple ): snake_case__ : int = self.head while len(node.forward ) != 0: yield node.forward[0].key snake_case__ : Dict = node.forward[0] def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ): snake_case__ : Optional[Any] = [] snake_case__ : Tuple = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: snake_case__ : Dict = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case_ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def lowerCamelCase ( self : Tuple , snake_case_ : KT ): snake_case__ , snake_case__ : List[str] = self._locate_node(snake_case_ ) if node is not None: for i, update_node in enumerate(snake_case_ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: snake_case__ : Union[str, Any] = node.forward[i] else: snake_case__ : Any = update_node.forward[:i] def lowerCamelCase ( self : List[Any] , snake_case_ : KT , snake_case_ : VT ): snake_case__ , snake_case__ : Optional[int] = self._locate_node(snake_case_ ) if node is not None: snake_case__ : str = value else: snake_case__ : Tuple = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case_ ): update_vector.append(self.head ) snake_case__ : int = level snake_case__ : str = Node(snake_case_ , snake_case_ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case_ ) else: snake_case__ : Union[str, Any] = new_node def lowerCamelCase ( self : Any , snake_case_ : VT ): snake_case__ , snake_case__ : Optional[Any] = self._locate_node(snake_case_ ) if node is not None: return node.value return None def __snake_case( ) -> str: snake_case__ : str = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) snake_case__ : Dict = skip_list.head snake_case__ : Dict = {} while node.level != 0: snake_case__ : Dict = node.forward[0] snake_case__ : List[Any] = node.value assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def __snake_case( ) -> str: snake_case__ : Union[str, Any] = SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) snake_case__ : Tuple = skip_list.head snake_case__ : int = {} while node.level != 0: snake_case__ : Any = node.forward[0] snake_case__ : Optional[int] = node.value if len(_lowerCAmelCase ) != 4: print() assert len(_lowerCAmelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def __snake_case( ) -> Any: snake_case__ : str = SkipList() assert skip_list.find("""Some key""" ) is None def __snake_case( ) -> Optional[Any]: snake_case__ : Dict = SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def __snake_case( ) -> Union[str, Any]: snake_case__ : int = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def __snake_case( ) -> Any: snake_case__ : Tuple = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def __snake_case( ) -> int: snake_case__ : Dict = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def __snake_case( ) -> Optional[int]: snake_case__ : Dict = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 142 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(_lowerCAmelCase ): yield node.key for forward_node in node.forward: yield from traverse_keys(_lowerCAmelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __snake_case( ) -> Tuple: def is_sorted(_lowerCAmelCase ): return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) ) snake_case__ : Optional[Any] = SkipList() for i in range(10 ): skip_list.insert(_lowerCAmelCase , _lowerCAmelCase ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_lowerCAmelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_lowerCAmelCase ) ) def __snake_case( ) -> List[str]: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __snake_case( ) -> Dict: snake_case__ : Any = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
35
'''simple docstring''' import numpy as np from transformers import Pipeline def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase ) snake_case__ : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ): snake_case__ : Optional[int] = {} if "second_text" in kwargs: snake_case__ : Union[str, Any] = kwargs["""second_text"""] return preprocess_kwargs, {}, {} def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ): return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework ) def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ): return self.model(**snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : List[Any] ): snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy() snake_case__ : List[str] = softmax(snake_case_ ) snake_case__ : List[str] = np.argmax(snake_case_ ) snake_case__ : List[str] = self.model.config.idalabel[best_class] snake_case__ : Optional[int] = probabilities[best_class].item() snake_case__ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
35
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} ) lowercase = field( default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) lowercase = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} ) lowercase = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase = field(default=2 , metadata={"help": "Batch size for training."} ) lowercase = field(default=2 , metadata={"help": "Batch size for evaluation."} ) lowercase = field(default=0.1 , metadata={"help": "Value of weight decay."} ) lowercase = field( default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) lowercase = field(default=2e-4 , metadata={"help": "Learning rate fo training."} ) lowercase = field(default="cosine" , metadata={"help": "Learning rate."} ) lowercase = field( default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} ) lowercase = field( default=16 , metadata={"help": "Number of gradient accumulation steps."} ) lowercase = field( default=_a , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) lowercase = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} ) lowercase = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} ) lowercase = field(default=1 , metadata={"help": "Training seed."} ) lowercase = field( default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , ) lowercase = field( default=_a , metadata={"help": "States path if the training should continue from a checkpoint folder."} ) lowercase = field(default=_a , metadata={"help": "If True the data is pretokenized."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase = field( default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} ) lowercase = field(default=2 , metadata={"help": "Batch size used for evaluation."} ) lowercase = field( default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) lowercase = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} ) lowercase = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} ) lowercase = field(default=_a , metadata={"help": "Number of workers used for code evaluation."} ) lowercase = field( default=_a , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , ) lowercase = field( default=_a , metadata={"help": "Sample from the language model's output distribution."} ) lowercase = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} ) lowercase = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} ) lowercase = field(default=0 , metadata={"help": "Top-k parameter used for generation."} ) lowercase = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} ) lowercase = field(default=10 , metadata={"help": "Number of generations to run in parallel."} ) lowercase = field( default=2_00 , metadata={"help": "Number of completions to generate for each sample."} ) lowercase = field(default=1 , metadata={"help": "Random seed used for evaluation."} ) lowercase = field( default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} ) lowercase = field( default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) lowercase = field( default=-1 , metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default=_a , metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." } , ) lowercase = field( default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} ) lowercase = field( default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} ) lowercase = field( default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} ) lowercase = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase = field( default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) lowercase = field( default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) lowercase = field( default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) lowercase = field( default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) lowercase = field( default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} ) lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , ) lowercase = field( default=_a , metadata={"help": "If True, near-duplicate samples are removed."} ) lowercase = field( default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} ) lowercase = field( default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} ) lowercase = field(default="content" , metadata={"help": "Column containing text data to process."} ) lowercase = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} ) lowercase = field( default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} ) lowercase = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} ) lowercase = field(default=_a , metadata={"help": "Push saved tokenizer to the hub."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} ) lowercase = field( default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} ) lowercase = field( default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} ) lowercase = field(default=_a , metadata={"help": "Number of workers used for code evaluation."} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowercase = field( default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} ) lowercase = field( default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} ) lowercase = field(default="codeparrot" , metadata={"help": "Name of the created model."} ) lowercase = field(default=_a , metadata={"help": "Push saved tokenizer to the hub."} )
35
'''simple docstring''' # Function to print upper half of diamond (pyramid) def __snake_case( _lowerCAmelCase ) -> Any: for i in range(0 , _lowerCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(""" """ , end="""""" ) for _ in range(0 , i + 1 ): # printing stars print("""* """ , end="""""" ) print() def __snake_case( _lowerCAmelCase ) -> List[str]: for i in range(_lowerCAmelCase , 0 , -1 ): for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars print("""* """ , end="""""" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(""" """ , end="""""" ) def __snake_case( _lowerCAmelCase ) -> List[Any]: if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(R"| /\ | |- | |- |--| |\ /| |-") print(R"|/ \| |- |_ |_ |__| | \/ | |_") __a = 1 while K: __a = int(input("enter the number and , and see the magic : ")) print() pretty_print(user_number) __a = int(input("press 0 to exit... and 1 to continue...")) print("Good Bye...")
35
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: def run_func(_lowerCAmelCase ): @wraps(_lowerCAmelCase ) def run_in_eager_mode(*_lowerCAmelCase , **_lowerCAmelCase ): return func(*_lowerCAmelCase , **_lowerCAmelCase ) @wraps(_lowerCAmelCase ) @tf.function(experimental_compile=_lowerCAmelCase ) def run_in_graph_mode(*_lowerCAmelCase , **_lowerCAmelCase ): return func(*_lowerCAmelCase , **_lowerCAmelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> ["tf.Tensor"]: snake_case__ : Optional[int] = random.Random() snake_case__ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = 42 lowercase = 42 lowercase = "TensorFlow" @property def lowerCamelCase ( self : Dict ): return tf.__version__ def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ): # initialize GPU on separate process snake_case__ : Any = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) snake_case__ : List[Any] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ ) return self._measure_speed(_inference ) def lowerCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : int , snake_case_ : int ): snake_case__ : List[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) snake_case__ : List[Any] = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ ) return self._measure_speed(_train ) def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ ) snake_case__ : Optional[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) snake_case__ : Optional[int] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ ) return self._measure_memory(_inference ) def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ ) snake_case__ : Dict = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) snake_case__ : str = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ ) return self._measure_memory(_train ) def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : int , snake_case_ : int ): snake_case__ : Dict = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) snake_case__ : Dict = ( hasattr(snake_case_ , """architectures""" ) and isinstance(config.architectures , snake_case_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: snake_case__ : Optional[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model snake_case__ : int = __import__("""transformers""" , fromlist=[model_class] ) snake_case__ : str = getattr(snake_case_ , snake_case_ ) snake_case__ : Union[str, Any] = model_cls(snake_case_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: snake_case__ : Any = TF_MODEL_MAPPING[config.__class__](snake_case_ ) # encoder-decoder has vocab size saved differently snake_case__ : Optional[int] = config.vocab_size if hasattr(snake_case_ , """vocab_size""" ) else config.encoder.vocab_size snake_case__ : Optional[int] = random_input_ids(snake_case_ , snake_case_ , snake_case_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(snake_case_ , training=snake_case_ ) snake_case__ : Any = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ): snake_case__ : List[Any] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) snake_case__ : Tuple = ( hasattr(snake_case_ , """architectures""" ) and isinstance(config.architectures , snake_case_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: snake_case__ : List[Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model snake_case__ : Tuple = __import__("""transformers""" , fromlist=[model_class] ) snake_case__ : List[str] = getattr(snake_case_ , snake_case_ ) snake_case__ : List[Any] = model_cls(snake_case_ ) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: snake_case__ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ ) # encoder-decoder has vocab size saved differently snake_case__ : int = config.vocab_size if hasattr(snake_case_ , """vocab_size""" ) else config.encoder.vocab_size snake_case__ : Dict = random_input_ids(snake_case_ , snake_case_ , snake_case_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): snake_case__ : Dict = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0] snake_case__ : str = tf.gradients(snake_case_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): snake_case__ : List[str] = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0] snake_case__ : List[str] = tf.gradients(snake_case_ , model.trainable_variables ) return gradients snake_case__ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCamelCase ( self : List[str] , snake_case_ : Any ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(snake_case_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average snake_case__ : List[Any] = timeit.repeat( snake_case_ , repeat=self.args.repeat , number=10 , ) return min(snake_case_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) snake_case__ : int = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) snake_case__ : Union[str, Any] = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() snake_case__ : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) snake_case__ : List[Any] = nvml.nvmlDeviceGetMemoryInfo(snake_case_ ) snake_case__ : List[str] = meminfo.used snake_case__ : int = Memory(snake_case_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) snake_case__ : List[Any] = None else: snake_case__ : Any = measure_peak_memory_cpu(snake_case_ ) snake_case__ : str = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes if self.args.trace_memory_line_by_line: snake_case__ : Optional[int] = stop_memory_tracing(snake_case_ ) if memory is None: snake_case__ : Union[str, Any] = summary.total else: snake_case__ : List[str] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"Doesn't fit on GPU. {e}" ) return "N/A", None
35
'''simple docstring''' def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
35
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __a = random.Random() def __snake_case( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Dict: if rng is None: snake_case__ : Optional[int] = global_rng snake_case__ : int = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : Optional[int]=400 , snake_case_ : Optional[Any]=2_000 , snake_case_ : List[Any]=1 , snake_case_ : List[str]=0.0 , snake_case_ : Union[str, Any]=16_000 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=True , ): snake_case__ : Optional[Any] = parent snake_case__ : Union[str, Any] = batch_size snake_case__ : List[str] = min_seq_length snake_case__ : Dict = max_seq_length snake_case__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case__ : Dict = feature_size snake_case__ : int = padding_value snake_case__ : Any = sampling_rate snake_case__ : str = return_attention_mask snake_case__ : Optional[Any] = do_normalize def lowerCamelCase ( self : Dict ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCamelCase ( self : int , snake_case_ : str=False , snake_case_ : Any=False ): def _flatten(snake_case_ : Optional[int] ): return list(itertools.chain(*snake_case_ ) ) if equal_length: snake_case__ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size snake_case__ : Optional[Any] = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case__ : List[Any] = [np.asarray(snake_case_ ) for x in speech_inputs] return speech_inputs class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = WavaVecaFeatureExtractor def lowerCamelCase ( self : Tuple ): snake_case__ : Any = WavaVecaFeatureExtractionTester(self ) def lowerCamelCase ( self : int , snake_case_ : Union[str, Any] ): self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCamelCase ( self : List[Any] ): # Tests that all call wrap to encode_plus and batch_encode_plus snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case__ : int = [np.asarray(snake_case_ ) for speech_input in speech_inputs] # Test not batched input snake_case__ : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values snake_case__ : str = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) ) # Test batched snake_case__ : Optional[int] = feat_extract(snake_case_ , return_tensors="""np""" ).input_values snake_case__ : Any = feat_extract(snake_case_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ): self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] snake_case__ : Union[str, Any] = np.asarray(snake_case_ ) snake_case__ : Union[str, Any] = feat_extract(snake_case_ , return_tensors="""np""" ).input_values snake_case__ : Tuple = feat_extract(snake_case_ , return_tensors="""np""" ).input_values for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ): self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case__ : str = ["""longest""", """max_length""", """do_not_pad"""] snake_case__ : str = [None, 1_600, None] for max_length, padding in zip(snake_case_ , snake_case_ ): snake_case__ : Any = feat_extract(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors="""np""" ) snake_case__ : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self.assertTrue(input_values[0][1_000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def lowerCamelCase ( self : Any ): snake_case__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : str = range(800 , 1_400 , 200 ) snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in lengths] snake_case__ : Optional[Any] = ["""longest""", """max_length""", """do_not_pad"""] snake_case__ : Optional[Any] = [None, 1_600, None] for max_length, padding in zip(snake_case_ , snake_case_ ): snake_case__ : int = feat_extract(snake_case_ , max_length=snake_case_ , padding=snake_case_ ) snake_case__ : List[str] = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1_000] ) self._check_zero_mean_unit_variance(input_values[2][:1_200] ) def lowerCamelCase ( self : Tuple ): snake_case__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case__ : Any = feat_extract( snake_case_ , truncation=snake_case_ , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" ) snake_case__ : Optional[int] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def lowerCamelCase ( self : Any ): snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case__ : List[str] = feat_extract( snake_case_ , truncation=snake_case_ , max_length=1_000 , padding="""longest""" , return_tensors="""np""" ) snake_case__ : Tuple = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1_000) ) snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] snake_case__ : Dict = feat_extract( snake_case_ , truncation=snake_case_ , max_length=2_000 , padding="""longest""" , return_tensors="""np""" ) snake_case__ : Union[str, Any] = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1_000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1_200) ) @require_torch def lowerCamelCase ( self : List[Any] ): import torch snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case__ : List[str] = np.random.rand(100 ).astype(np.floataa ) snake_case__ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case__ : str = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) snake_case__ : Union[str, Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def lowerCamelCase ( self : Any ): # this test makes sure that models that are using # group norm don't have their feature extractor return the # attention_mask for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: snake_case__ : List[str] = WavaVecaConfig.from_pretrained(snake_case_ ) snake_case__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(snake_case_ ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = BlenderbotSmallTokenizer lowercase = False def lowerCamelCase ( self : Union[str, Any] ): super().setUp() snake_case__ : Optional[Any] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""] snake_case__ : List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) snake_case__ : int = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""] snake_case__ : Dict = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""} snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(snake_case_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(snake_case_ ) ) def lowerCamelCase ( self : List[Any] , **snake_case_ : Optional[int] ): kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Tuple , snake_case_ : Dict ): snake_case__ : List[str] = """adapt act apte""" snake_case__ : Optional[int] = """adapt act apte""" return input_text, output_text def lowerCamelCase ( self : Any ): snake_case__ : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case__ : str = """adapt act apte""" snake_case__ : Optional[int] = ["""adapt""", """act""", """ap@@""", """te"""] snake_case__ : List[str] = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] snake_case__ : Any = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) assert tok("""sam""" ).input_ids == [1_384] snake_case__ : Optional[int] = """I am a small frog.""" snake_case__ : List[str] = tok([src_text] , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""] snake_case__ : Tuple = tok.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def lowerCamelCase ( self : Any ): snake_case__ : Tuple = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) snake_case__ : Any = """I am a small frog .""" snake_case__ : List[str] = """.""" snake_case__ : Any = tok(snake_case_ )["""input_ids"""] snake_case__ : str = tok(snake_case_ )["""input_ids"""] assert encoded[-1] == encoded_dot[0]
35
'''simple docstring''' from PIL import Image def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Image: def brightness(_lowerCAmelCase ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 __a = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
35
1
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 snake_case__ : List[Any] = 1 snake_case__ : List[str] = 1 while repunit: snake_case__ : int = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def __snake_case( _lowerCAmelCase = 1_000_000 ) -> int: snake_case__ : str = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"{solution() = }")
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> int: if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return 0 elif n == 2: return 1 else: snake_case__ : Optional[int] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Union[str, Any] = 0 snake_case__ : List[str] = 2 while digits < n: index += 1 snake_case__ : Any = len(str(fibonacci(_lowerCAmelCase ) ) ) return index def __snake_case( _lowerCAmelCase = 1_000 ) -> int: return fibonacci_digits_index(_lowerCAmelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
35
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: for attribute in key.split(""".""" ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: snake_case__ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : List[str] = value elif weight_type == "weight_v": snake_case__ : List[str] = value elif weight_type == "bias": snake_case__ : Optional[Any] = value else: snake_case__ : str = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case__ : Optional[int] = None for name, value in fairseq_dict.items(): snake_case__ : List[Any] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) snake_case__ : Union[str, Any] = True elif name.split(""".""" )[0] == "proj": snake_case__ : Tuple = fairseq_model.proj snake_case__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Optional[int] = name.split(_lowerCAmelCase )[0].split(""".""" )[-2] snake_case__ : Tuple = mapped_key.replace("""*""" , _lowerCAmelCase ) if "weight_g" in name: snake_case__ : str = """weight_g""" elif "weight_v" in name: snake_case__ : int = """weight_v""" elif "bias" in name: snake_case__ : Dict = """bias""" elif "weight" in name: snake_case__ : Union[str, Any] = """weight""" else: snake_case__ : Union[str, Any] = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : int = full_name.split("""conv_layers.""" )[-1] snake_case__ : Dict = name.split(""".""" ) snake_case__ : Any = int(items[0] ) snake_case__ : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) snake_case__ : str = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) snake_case__ : Union[str, Any] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) snake_case__ : int = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ , snake_case__ : str = emb.weight.shape snake_case__ : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) snake_case__ : List[str] = emb.weight.data return lin_layer def __snake_case( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f: snake_case__ : int = f.readlines() snake_case__ : List[Any] = [line.split(""" """ )[0] for line in lines] snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) snake_case__ : Any = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> int: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaConfig.from_pretrained( _lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() # set weights for wav2vec2 encoder snake_case__ : Optional[Any] = WavaVecaModel(_lowerCAmelCase ) snake_case__ : Dict = recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase ) snake_case__ : Optional[Any] = SpeechaTextaForCausalLM(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase ) # set output linear layer unexpected_keys.remove("""embed_out""" ) snake_case__ : Tuple = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) snake_case__ : List[Any] = SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase ) snake_case__ : Tuple = False # add projection layer snake_case__ : Union[str, Any] = nn.Parameter(projection_layer.weight ) snake_case__ : int = nn.Parameter(projection_layer.bias ) snake_case__ : Tuple = create_vocab_dict(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , """vocab.json""" ) , """w""" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Tuple = SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , """vocab.json""" ) ) tokenizer.save_pretrained(_lowerCAmelCase ) snake_case__ : Optional[Any] = hf_wavavec.config.to_dict() snake_case__ : Tuple = tokenizer.pad_token_id snake_case__ : Optional[Any] = tokenizer.bos_token_id snake_case__ : int = tokenizer.eos_token_id snake_case__ : str = """speech_to_text_2""" snake_case__ : List[Any] = """wav2vec2""" snake_case__ : List[str] = SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) feature_extractor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") __a = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
35
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: __a = None try: import msvcrt except ImportError: __a = None try: import fcntl except ImportError: __a = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __a = OSError # Data # ------------------------------------------------ __a = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] __a = "3.0.12" __a = None def __snake_case( ) -> List[str]: global _logger snake_case__ : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Any , snake_case_ : List[Any] ): snake_case__ : List[str] = lock_file return None def __str__( self : Any ): snake_case__ : Optional[int] = f"The file lock '{self.lock_file}' could not be acquired." return temp class UpperCAmelCase_ : """simple docstring""" def __init__( self : List[Any] , snake_case_ : int ): snake_case__ : str = lock return None def __enter__( self : Dict ): return self.lock def __exit__( self : Any , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Tuple ): self.lock.release() return None class UpperCAmelCase_ : """simple docstring""" def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any]=-1 , snake_case_ : List[Any]=None ): snake_case__ : Any = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long snake_case__ : Any = self.hash_filename_if_too_long(snake_case_ , snake_case_ ) # The path to the lock file. snake_case__ : Optional[int] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. snake_case__ : List[str] = None # The default timeout value. snake_case__ : str = timeout # We use this lock primarily for the lock counter. snake_case__ : Any = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. snake_case__ : Tuple = 0 return None @property def lowerCamelCase ( self : Any ): return self._lock_file @property def lowerCamelCase ( self : Optional[int] ): return self._timeout @timeout.setter def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ): snake_case__ : int = float(snake_case_ ) return None def lowerCamelCase ( self : Optional[int] ): raise NotImplementedError() def lowerCamelCase ( self : Union[str, Any] ): raise NotImplementedError() @property def lowerCamelCase ( self : int ): return self._lock_file_fd is not None def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str]=None , snake_case_ : str=0.05 ): # Use the default timeout, if no timeout is provided. if timeout is None: snake_case__ : int = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 snake_case__ : List[Any] = id(self ) snake_case__ : Any = self._lock_file snake_case__ : Optional[Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" ) self._acquire() if self.is_locked: logger().debug(f"Lock {lock_id} acquired on {lock_filename}" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" ) raise Timeout(self._lock_file ) else: logger().debug( f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." ) time.sleep(snake_case_ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: snake_case__ : List[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def lowerCamelCase ( self : str , snake_case_ : Tuple=False ): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: snake_case__ : Optional[int] = id(self ) snake_case__ : Optional[int] = self._lock_file logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" ) self._release() snake_case__ : Dict = 0 logger().debug(f"Lock {lock_id} released on {lock_filename}" ) return None def __enter__( self : Dict ): self.acquire() return self def __exit__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict ): self.release() return None def __del__( self : List[str] ): self.release(force=snake_case_ ) return None def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : int ): snake_case__ : Optional[int] = os.path.basename(snake_case_ ) if len(snake_case_ ) > max_length and max_length > 0: snake_case__ : Dict = os.path.dirname(snake_case_ ) snake_case__ : Optional[int] = str(hash(snake_case_ ) ) snake_case__ : Optional[Any] = filename[: max_length - len(snake_case_ ) - 8] + """...""" + hashed_filename + """.lock""" return os.path.join(snake_case_ , snake_case_ ) else: return path class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : Any , snake_case_ : Optional[Any] , snake_case_ : Dict=-1 , snake_case_ : Optional[int]=None ): from .file_utils import relative_to_absolute_path super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ ) snake_case__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file ) def lowerCamelCase ( self : Any ): snake_case__ : Dict = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: snake_case__ : str = os.open(self._lock_file , snake_case_ ) except OSError: pass else: try: msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(snake_case_ ) else: snake_case__ : List[Any] = fd return None def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = self._lock_file_fd snake_case__ : Tuple = None msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 ) os.close(snake_case_ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : int , snake_case_ : List[str] , snake_case_ : Optional[Any]=-1 , snake_case_ : Optional[Any]=None ): snake_case__ : Union[str, Any] = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ ) def lowerCamelCase ( self : int ): snake_case__ : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC snake_case__ : Dict = os.open(self._lock_file , snake_case_ ) try: fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(snake_case_ ) else: snake_case__ : List[Any] = fd return None def lowerCamelCase ( self : int ): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition snake_case__ : Union[str, Any] = self._lock_file_fd snake_case__ : Optional[Any] = None fcntl.flock(snake_case_ , fcntl.LOCK_UN ) os.close(snake_case_ ) return None class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : Dict ): snake_case__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: snake_case__ : Any = os.open(self._lock_file , snake_case_ ) except OSError: pass else: snake_case__ : str = fd return None def lowerCamelCase ( self : Optional[Any] ): os.close(self._lock_file_fd ) snake_case__ : str = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __a = None if msvcrt: __a = WindowsFileLock elif fcntl: __a = UnixFileLock else: __a = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
35
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f"{test_file} instead." ) snake_case__ : Dict = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )] snake_case__ : int = """.""".join(_lowerCAmelCase ) return test_module_path def __snake_case( _lowerCAmelCase ) -> List[str]: snake_case__ : str = get_module_path(_lowerCAmelCase ) snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase ) return test_module def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : List[Any] = [] snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : List[str] = [] snake_case__ : Any = get_test_module(_lowerCAmelCase ) for attr in dir(_lowerCAmelCase ): snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] ) if len(_lowerCAmelCase ) > 0: test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Dict: snake_case__ : Any = get_test_classes(_lowerCAmelCase ) snake_case__ : Optional[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Optional[Any]: snake_case__ : Optional[int] = test_class() if hasattr(_lowerCAmelCase , """setUp""" ): test.setUp() snake_case__ : Any = None if hasattr(_lowerCAmelCase , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: snake_case__ : Tuple = test.model_tester.__class__ return model_tester def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : str = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) snake_case__ : Union[str, Any] = [] for test_class in test_classes: snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase ) if tester_class is not None: tester_classes.append(_lowerCAmelCase ) # sort with class names return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ ) def __snake_case( _lowerCAmelCase ) -> Union[str, Any]: snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase ) snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: snake_case__ : Any = get_model_classes(_lowerCAmelCase ) snake_case__ : Any = { model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_test_mapping def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase ) snake_case__ : str = { model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case( _lowerCAmelCase ) -> int: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return o.__name__ elif isinstance(_lowerCAmelCase , (list, tuple) ): return [to_json(_lowerCAmelCase ) for x in o] elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()} else: return o
35
1
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib __a = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } __a = logging.WARNING def __snake_case( ) -> Optional[int]: snake_case__ : List[Any] = os.getenv("""DATASETS_VERBOSITY""" , _lowerCAmelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def __snake_case( ) -> str: return __name__.split(""".""" )[0] def __snake_case( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def __snake_case( ) -> None: # Apply our default configuration to the library root logger. snake_case__ : Union[str, Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __snake_case( ) -> None: snake_case__ : Dict = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __snake_case( _lowerCAmelCase = None ) -> logging.Logger: if name is None: snake_case__ : str = _get_library_name() return logging.getLogger(_lowerCAmelCase ) def __snake_case( ) -> int: return _get_library_root_logger().getEffectiveLevel() def __snake_case( _lowerCAmelCase ) -> None: _get_library_root_logger().setLevel(_lowerCAmelCase ) def __snake_case( ) -> str: return set_verbosity(_lowerCAmelCase ) def __snake_case( ) -> Dict: return set_verbosity(_lowerCAmelCase ) def __snake_case( ) -> Union[str, Any]: return set_verbosity(_lowerCAmelCase ) def __snake_case( ) -> List[str]: return set_verbosity(_lowerCAmelCase ) def __snake_case( ) -> None: snake_case__ : Optional[int] = False def __snake_case( ) -> None: snake_case__ : Optional[Any] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCAmelCase_ : """simple docstring""" def __init__( self : Dict , *snake_case_ : List[Any] , **snake_case_ : int ): # pylint: disable=unused-argument snake_case__ : Union[str, Any] = args[0] if args else None def __iter__( self : Optional[int] ): return iter(self._iterator ) def __getattr__( self : int , snake_case_ : Optional[int] ): def empty_fn(*snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Union[str, Any] ): return self def __exit__( self : Dict , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : List[str] ): return __a = True class UpperCAmelCase_ : """simple docstring""" def __call__( self : str , *snake_case_ : List[str] , snake_case_ : Any=False , **snake_case_ : Optional[int] ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*snake_case_ , **snake_case_ ) else: return EmptyTqdm(*snake_case_ , **snake_case_ ) def lowerCamelCase ( self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Dict ): snake_case__ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ ) def lowerCamelCase ( self : Tuple ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() __a = _tqdm_cls() def __snake_case( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def __snake_case( ) -> List[Any]: global _tqdm_active snake_case__ : Union[str, Any] = True def __snake_case( ) -> Dict: global _tqdm_active snake_case__ : Optional[Any] = False
35
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : Dict = SwinConfig() snake_case__ : Optional[Any] = swin_name.split("""_""" ) snake_case__ : Any = name_split[1] snake_case__ : List[Any] = int(name_split[4] ) snake_case__ : int = int(name_split[3][-1] ) if model_size == "tiny": snake_case__ : List[Any] = 96 snake_case__ : int = (2, 2, 6, 2) snake_case__ : int = (3, 6, 12, 24) elif model_size == "small": snake_case__ : Union[str, Any] = 96 snake_case__ : Optional[Any] = (2, 2, 18, 2) snake_case__ : str = (3, 6, 12, 24) elif model_size == "base": snake_case__ : Dict = 128 snake_case__ : str = (2, 2, 18, 2) snake_case__ : Dict = (4, 8, 16, 32) else: snake_case__ : List[str] = 192 snake_case__ : str = (2, 2, 18, 2) snake_case__ : List[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case__ : str = 21_841 else: snake_case__ : List[str] = 1_000 snake_case__ : int = """huggingface/label-files""" snake_case__ : Any = """imagenet-1k-id2label.json""" snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : Optional[int] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} snake_case__ : List[Any] = img_size snake_case__ : Dict = num_classes snake_case__ : Dict = embed_dim snake_case__ : Optional[int] = depths snake_case__ : int = num_heads snake_case__ : Optional[int] = window_size return config def __snake_case( _lowerCAmelCase ) -> Dict: if "patch_embed.proj" in name: snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case__ : str = """encoder.""" + name if "attn.proj" in name: snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case__ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case__ : Union[str, Any] = """layernorm.bias""" if "head" in name: snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" ) else: snake_case__ : List[str] = """swin.""" + name return name def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: snake_case__ : Dict = key.split(""".""" ) snake_case__ : Optional[int] = int(key_split[1] ) snake_case__ : Union[str, Any] = int(key_split[3] ) snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case__ : Optional[Any] = val[:dim, :] snake_case__ : Tuple = val[ dim : dim * 2, : ] snake_case__ : Dict = val[-dim:, :] else: snake_case__ : Tuple = val[ :dim ] snake_case__ : int = val[ dim : dim * 2 ] snake_case__ : int = val[ -dim: ] else: snake_case__ : Union[str, Any] = val return orig_state_dict def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase ) snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase ) model.eval() snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ) snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] ) snake_case__ : str = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' __a = { "joule": 1.0, "kilojoule": 1000, "megajoule": 100_0000, "gigajoule": 10_0000_0000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 360_0000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 418_6800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1055.0_5585, "footpound": 1.355818, } def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case__ : Optional[int] = ( f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" f"Valid values are: {', '.join(_lowerCAmelCase )}" ) raise ValueError(_lowerCAmelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ): warnings.warn( """The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use BeitImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { "configuration_jukebox": [ "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "JukeboxConfig", "JukeboxPriorConfig", "JukeboxVQVAEConfig", ], "tokenization_jukebox": ["JukeboxTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", "JukeboxModel", "JukeboxPreTrainedModel", "JukeboxVQVAE", "JukeboxPrior", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = field(default=_a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=_a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=_a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=_a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def lowerCamelCase ( self : List[str] ): snake_case__ : int = super().to_dict() for k, v in d.items(): if isinstance(snake_case_ , snake_case_ ): snake_case__ : Optional[int] = v.to_dict() return d
35
1
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __a = { "facebook/maskformer-swin-base-ade": ( "https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __a = logging.get_logger(__name__) class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "maskformer" lowercase = {"hidden_size": "mask_feature_size"} lowercase = ["resnet", "swin"] lowercase = ["detr"] def __init__( self : Any , snake_case_ : int = 256 , snake_case_ : int = 256 , snake_case_ : float = 0.1 , snake_case_ : bool = False , snake_case_ : Optional[Dict] = None , snake_case_ : Optional[Dict] = None , snake_case_ : float = 0.02 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 1.0 , snake_case_ : float = 20.0 , snake_case_ : Optional[bool] = None , **snake_case_ : int , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k snake_case__ : Optional[Any] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(snake_case_ , snake_case_ ): snake_case__ : Dict = backbone_config.pop("""model_type""" ) snake_case__ : int = CONFIG_MAPPING[backbone_model_type] snake_case__ : Optional[int] = config_class.from_dict(snake_case_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " f"Supported model types: {','.join(self.backbones_supported )}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 snake_case__ : Optional[Any] = DetrConfig() else: # verify that the decoder is supported snake_case__ : List[str] = ( decoder_config.pop("""model_type""" ) if isinstance(snake_case_ , snake_case_ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"Transformer Decoder {decoder_type} not supported, please use one of" f" {','.join(self.decoders_supported )}" ) if isinstance(snake_case_ , snake_case_ ): snake_case__ : Union[str, Any] = CONFIG_MAPPING[decoder_type] snake_case__ : Union[str, Any] = config_class.from_dict(snake_case_ ) snake_case__ : Any = backbone_config snake_case__ : Optional[int] = decoder_config # main feature dimension for the model snake_case__ : int = fpn_feature_size snake_case__ : Any = mask_feature_size # initializer snake_case__ : str = init_std snake_case__ : Optional[Any] = init_xavier_std # Hungarian matcher && loss snake_case__ : Optional[int] = cross_entropy_weight snake_case__ : Dict = dice_weight snake_case__ : List[str] = mask_weight snake_case__ : Tuple = use_auxiliary_loss snake_case__ : Any = no_object_weight snake_case__ : List[str] = output_auxiliary_logits snake_case__ : int = self.decoder_config.encoder_attention_heads snake_case__ : str = self.decoder_config.num_hidden_layers super().__init__(**snake_case_ ) @classmethod def lowerCamelCase ( cls : int , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : int ): return cls( backbone_config=snake_case_ , decoder_config=snake_case_ , **snake_case_ , ) def lowerCamelCase ( self : Dict ): snake_case__ : str = copy.deepcopy(self.__dict__ ) snake_case__ : int = self.backbone_config.to_dict() snake_case__ : List[Any] = self.decoder_config.to_dict() snake_case__ : int = self.__class__.model_type return output
35
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str: snake_case__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = """""" else: snake_case__ : Dict = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size] snake_case__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : List[str] = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : Tuple = in_proj_bias[-config.hidden_size :] def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : str = dct.pop(_lowerCAmelCase ) snake_case__ : Tuple = val def __snake_case( ) -> Tuple: snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str: snake_case__ : Optional[int] = DeiTConfig() # all deit models have fine-tuned heads snake_case__ : Union[str, Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ : int = 1_000 snake_case__ : Any = """huggingface/label-files""" snake_case__ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) ) snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case__ : List[Any] = idalabel snake_case__ : List[str] = {v: k for k, v in idalabel.items()} snake_case__ : Tuple = int(deit_name[-6:-4] ) snake_case__ : Optional[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ : Tuple = 192 snake_case__ : Union[str, Any] = 768 snake_case__ : Tuple = 12 snake_case__ : Union[str, Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ : str = 384 snake_case__ : Any = 1_536 snake_case__ : str = 12 snake_case__ : int = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ : Union[str, Any] = 1_024 snake_case__ : Any = 4_096 snake_case__ : List[Any] = 24 snake_case__ : Tuple = 16 # load original model from timm snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : Optional[Any] = timm_model.state_dict() snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ : List[Any] = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ : Optional[Any] = encoding["""pixel_values"""] snake_case__ : Tuple = model(_lowerCAmelCase ) snake_case__ : Optional[int] = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCAmelCase ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--deit_name", default="vit_deit_base_distilled_patch16_224", type=str, help="Name of the DeiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __a = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
35
1
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase ( self : Tuple ): torch.manual_seed(0 ) snake_case__ : Dict = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model @property def lowerCamelCase ( self : str ): torch.manual_seed(0 ) snake_case__ : Optional[int] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , ) return model @property def lowerCamelCase ( self : List[str] ): torch.manual_seed(0 ) snake_case__ : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : List[str] = self.dummy_uncond_unet snake_case__ : int = DDIMScheduler() snake_case__ : Optional[Any] = self.dummy_vq_model snake_case__ : int = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ ) ldm.to(snake_case_ ) ldm.set_progress_bar_config(disable=snake_case_ ) snake_case__ : List[Any] = torch.manual_seed(0 ) snake_case__ : Dict = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images snake_case__ : str = torch.manual_seed(0 ) snake_case__ : List[Any] = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0] snake_case__ : List[Any] = image[0, -3:, -3:, -1] snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case__ : List[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) snake_case__ : str = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Any ): snake_case__ : int = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(snake_case_ ) ldm.set_progress_bar_config(disable=snake_case_ ) snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : Dict = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images snake_case__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) snake_case__ : Optional[int] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] ) snake_case__ : Optional[Any] = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
35
'''simple docstring''' import string from math import logaa def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: snake_case__ : List[str] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]: snake_case__ : Dict = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' snake_case__ : Any = corpus_without_punctuation.split("""\n""" ) snake_case__ : int = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase )) def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float: return round(tf * idf , 3 )
35
1
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
1
'''simple docstring''' def __snake_case( _lowerCAmelCase ) -> int: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) snake_case__ : Any = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = "glpn" def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ): super().__init__(**snake_case_ ) snake_case__ : Optional[Any] = num_channels snake_case__ : Dict = num_encoder_blocks snake_case__ : Tuple = depths snake_case__ : Union[str, Any] = sr_ratios snake_case__ : Tuple = hidden_sizes snake_case__ : Optional[Any] = patch_sizes snake_case__ : int = strides snake_case__ : List[Any] = mlp_ratios snake_case__ : Optional[int] = num_attention_heads snake_case__ : Dict = hidden_act snake_case__ : int = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : str = initializer_range snake_case__ : List[str] = drop_path_rate snake_case__ : int = layer_norm_eps snake_case__ : Tuple = decoder_hidden_size snake_case__ : List[Any] = max_depth snake_case__ : Dict = head_in_index
35
1
'''simple docstring''' from __future__ import annotations def __snake_case( _lowerCAmelCase ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(_lowerCAmelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(_lowerCAmelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
35
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __a = logging.get_logger(__name__) __a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __a = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } __a = { "junnyu/roformer_chinese_small": 1536, "junnyu/roformer_chinese_base": 1536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } __a = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RoFormerTokenizer def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ): super().__init__( snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , ) snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents ): snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) ) snake_case__ : Optional[int] = do_lower_case snake_case__ : Union[str, Any] = strip_accents snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ ) snake_case__ : str = do_lower_case def __getstate__( self : int ): snake_case__ : List[Any] = self.__dict__.copy() snake_case__ : str = BertPreTokenizer() return state def __setstate__( self : Dict , snake_case_ : Dict ): snake_case__ : List[Any] = d snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) ) def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ): snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ): snake_case__ : int = [self.sep_token_id] snake_case__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ): snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ ) return tuple(snake_case_ ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ): snake_case__ : Optional[Any] = BertPreTokenizer() return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
35
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any=100 , snake_case_ : List[str]=13 , snake_case_ : str=30 , snake_case_ : int=2 , snake_case_ : int=3 , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=32 , snake_case_ : Tuple=5 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : List[Any]="gelu" , snake_case_ : str=0.1 , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=10 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[Any]=3 , ): snake_case__ : Any = parent snake_case__ : List[str] = vocab_size snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[int] = image_size snake_case__ : int = patch_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = is_training snake_case__ : Dict = use_labels snake_case__ : Dict = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : Dict = num_attention_heads snake_case__ : str = intermediate_size snake_case__ : Optional[Any] = hidden_act snake_case__ : Optional[int] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : List[Any] = type_sequence_label_size snake_case__ : str = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : Dict = (image_size // patch_size) ** 2 snake_case__ : Dict = num_patches + 1 def lowerCamelCase ( self : Optional[Any] ): snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : str = None if self.use_labels: snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : str = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowerCamelCase ( self : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] ): snake_case__ : Union[str, Any] = FlaxBeitModel(config=snake_case_ ) snake_case__ : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict ): snake_case__ : str = FlaxBeitForMaskedImageModeling(config=snake_case_ ) snake_case__ : List[str] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Tuple ): snake_case__ : Dict = self.type_sequence_label_size snake_case__ : int = FlaxBeitForImageClassification(config=snake_case_ ) snake_case__ : Optional[int] = model(snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ : str = 1 snake_case__ : List[str] = FlaxBeitForImageClassification(snake_case_ ) snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : int = model(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Optional[Any] = self.prepare_config_and_inputs() ( ( snake_case__ ) , ( snake_case__ ) , ( snake_case__ ) , ) : Tuple = config_and_inputs snake_case__ : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowerCamelCase ( self : List[str] ): snake_case__ : str = FlaxBeitModelTester(self ) snake_case__ : Tuple = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 ) def lowerCamelCase ( self : Union[str, Any] ): self.config_tester.run_common_tests() def lowerCamelCase ( self : Optional[int] ): snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : int = model_class(snake_case_ ) snake_case__ : int = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Union[str, Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : List[str] ): snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): snake_case__ : str = self._prepare_for_class(snake_case_ , snake_case_ ) snake_case__ : Optional[Any] = model_class(snake_case_ ) @jax.jit def model_jitted(snake_case_ : List[Any] , **snake_case_ : Tuple ): return model(pixel_values=snake_case_ , **snake_case_ ) with self.subTest("""JIT Enabled""" ): snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): snake_case__ : Tuple = model_jitted(**snake_case_ ).to_tuple() self.assertEqual(len(snake_case_ ) , len(snake_case_ ) ) for jitted_output, output in zip(snake_case_ , snake_case_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase ( self : List[Any] ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: snake_case__ : Union[str, Any] = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" ) snake_case__ : int = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[Any]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @require_flax class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : Optional[int] ): return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCamelCase ( self : str ): snake_case__ : Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ) snake_case__ : Tuple = self.default_image_processor snake_case__ : Optional[Any] = prepare_img() snake_case__ : Optional[int] = image_processor(images=snake_case_ , return_tensors="""np""" ).pixel_values # prepare bool_masked_pos snake_case__ : Any = np.ones((1, 196) , dtype=snake_case_ ) # forward pass snake_case__ : Dict = model(pixel_values=snake_case_ , bool_masked_pos=snake_case_ ) snake_case__ : List[str] = outputs.logits # verify the logits snake_case__ : Dict = (1, 196, 8_192) self.assertEqual(logits.shape , snake_case_ ) snake_case__ : Any = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , snake_case_ , atol=1E-2 ) ) @slow def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ) snake_case__ : Optional[Any] = self.default_image_processor snake_case__ : Union[str, Any] = prepare_img() snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" ) # forward pass snake_case__ : List[Any] = model(**snake_case_ ) snake_case__ : Optional[Any] = outputs.logits # verify the logits snake_case__ : str = (1, 1_000) self.assertEqual(logits.shape , snake_case_ ) snake_case__ : Any = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) ) snake_case__ : Union[str, Any] = 281 self.assertEqual(logits.argmax(-1 ).item() , snake_case_ ) @slow def lowerCamelCase ( self : Dict ): snake_case__ : List[str] = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ) snake_case__ : Optional[int] = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors="""np""" ) # forward pass snake_case__ : Optional[int] = model(**snake_case_ ) snake_case__ : int = outputs.logits # verify the logits snake_case__ : int = (1, 21_841) self.assertEqual(logits.shape , snake_case_ ) snake_case__ : Optional[int] = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ) ) snake_case__ : Optional[int] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , snake_case_ )
35
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __snake_case( _lowerCAmelCase ) -> Optional[int]: snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) ) snake_case__ : List[str] = 0.01 with locka.acquire(): with pytest.raises(_lowerCAmelCase ): snake_case__ : str = time.time() locka.acquire(_lowerCAmelCase ) assert time.time() - _start > timeout def __snake_case( _lowerCAmelCase ) -> Tuple: snake_case__ : Dict = """a""" * 1_000 + """.lock""" snake_case__ : int = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(_lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 snake_case__ : Dict = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCAmelCase ): locka.acquire(0 )
35
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self : int , snake_case_ : Tuple , snake_case_ : List[str]=3 , snake_case_ : Tuple=32 , snake_case_ : List[Any]=3 , snake_case_ : List[str]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Tuple=True , snake_case_ : str=True , snake_case_ : int="relu" , snake_case_ : List[Any]=3 , snake_case_ : str=None , ): snake_case__ : List[Any] = parent snake_case__ : List[Any] = batch_size snake_case__ : int = image_size snake_case__ : List[Any] = num_channels snake_case__ : Optional[Any] = embeddings_size snake_case__ : Optional[int] = hidden_sizes snake_case__ : Tuple = depths snake_case__ : Any = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[int] = num_labels snake_case__ : int = scope snake_case__ : Tuple = len(snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) snake_case__ : List[str] = self.get_config() return config, pixel_values, labels def lowerCamelCase ( self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ): snake_case__ : Optional[Any] = TFResNetModel(config=snake_case_ ) snake_case__ : int = model(snake_case_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Union[str, Any] ): snake_case__ : str = self.num_labels snake_case__ : Optional[int] = TFResNetForImageClassification(snake_case_ ) snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self : Tuple ): snake_case__ : List[Any] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs snake_case__ : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase ): """simple docstring""" lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () lowercase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def lowerCamelCase ( self : Optional[int] ): snake_case__ : Tuple = TFResNetModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowerCamelCase ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self : str ): return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase ( self : int ): pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase ( self : List[Any] ): pass def lowerCamelCase ( self : List[Any] ): snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Dict = model_class(snake_case_ ) snake_case__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : Optional[int] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case_ ) def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case_ ) def lowerCamelCase ( self : List[str] ): def check_hidden_states_output(snake_case_ : Any , snake_case_ : Any , snake_case_ : List[str] ): snake_case__ : List[Any] = model_class(snake_case_ ) snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) ) snake_case__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : List[Any] = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case__ : Dict = layer_type snake_case__ : Optional[int] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case__ : List[Any] = True check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case_ ) @slow def lowerCamelCase ( self : Optional[Any] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = TFResNetModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def __snake_case( ) -> Optional[int]: snake_case__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case__ : List[Any] = self.default_image_processor snake_case__ : List[Any] = prepare_img() snake_case__ : List[str] = image_processor(images=snake_case_ , return_tensors="""tf""" ) # forward pass snake_case__ : Optional[Any] = model(**snake_case_ ) # verify the logits snake_case__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case_ ) snake_case__ : List[str] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case_ , atol=1E-4 ) )
35
'''simple docstring''' def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float: snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def __snake_case( ) -> List[str]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
'''simple docstring''' import operator def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list: snake_case__ : int = operator.lt if reverse else operator.gt snake_case__ : Any = solution or [] if not arr: return solution snake_case__ : int = [arr.pop(0 )] for i, item in enumerate(_lowerCAmelCase ): if _operator(_lowerCAmelCase , sublist[-1] ): sublist.append(_lowerCAmelCase ) arr.pop(_lowerCAmelCase ) # merging sublist into solution list if not solution: solution.extend(_lowerCAmelCase ) else: while sublist: snake_case__ : Optional[int] = sublist.pop(0 ) for i, xx in enumerate(_lowerCAmelCase ): if not _operator(_lowerCAmelCase , _lowerCAmelCase ): solution.insert(_lowerCAmelCase , _lowerCAmelCase ) break else: solution.append(_lowerCAmelCase ) strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
35
'''simple docstring''' __a = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset([]) __a = frozenset(["image"]) __a = frozenset( [ "image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image"]) __a = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "negative_prompt"]) __a = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) __a = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) __a = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["image", "mask_image"]) __a = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) __a = frozenset(["example_image", "image", "mask_image"]) __a = frozenset(["class_labels"]) __a = frozenset(["class_labels"]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset(["batch_size"]) __a = frozenset([]) __a = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) __a = frozenset(["prompt", "negative_prompt"]) __a = frozenset(["input_tokens"]) __a = frozenset(["input_tokens"])
35
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["MaskFormerFeatureExtractor"] __a = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] __a = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
35
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase ): """simple docstring""" lowercase = GPTSanJapaneseTokenizer lowercase = False lowercase = {"do_clean_text": False, "add_prefix_space": False} def lowerCamelCase ( self : str ): super().setUp() # fmt: off snake_case__ : Optional[Any] = ["""ใ“ใ‚“""", """ใ“ใ‚“ใซ""", """ใซใกใฏ""", """ใฐใ‚“ใฏ""", """ไธ–็•Œ,ใ”บ็•Œ""", """ใ€""", """ใ€‚""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐Ÿ˜€ snake_case__ : List[Any] = {"""unk_token""": """<unk>"""} snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(snake_case_ ) ) def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : Any , snake_case_ : str ): snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ \nใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" return input_text, output_text def lowerCamelCase ( self : Any , snake_case_ : Dict ): snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ ) snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) return text, ids def lowerCamelCase ( self : Optional[Any] ): pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any] ): pass # TODO add if relevant def lowerCamelCase ( self : List[str] ): pass # TODO add if relevant def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() # Testing tokenization snake_case__ : int = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ€€ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = ["""ใ“ใ‚“""", """ใซใกใฏ""", """ใ€""", """ไธ–็•Œ""", """ใ€‚""", """<SP>""", """ใ“ใ‚“""", """ใฐใ‚“ใฏ""", """ใ€""", """ใ”บ็•Œ""", """ใ€‚"""] snake_case__ : Dict = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids without special tokens snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) # Testing conversion to ids with special tokens snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token] snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCamelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.get_tokenizer() # Testing tokenization snake_case__ : Union[str, Any] = """ใ“ใ‚“ใซใกใฏใ€<|bagoftoken|>ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€<|bagoftoken|>ใ”บ็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใซใกใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ใ€ใ€ใ€ไธ–็•Œใ€‚""" snake_case__ : Any = tokenizer.encode(snake_case_ ) snake_case__ : int = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Tuple = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[Any] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : List[str] = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚ใ“ใ‚“ใฐใ‚“ใฏใ€ไธ–็•Œใ€‚๐Ÿ˜€""" snake_case__ : Dict = tokenizer.encode(prefix_text + input_text ) snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ ) snake_case__ : str = tokenizer.decode(snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization snake_case__ : Dict = """ใ“ใ‚“ใซใกใฏใ€ไธ–็•Œใ€‚""" snake_case__ : Optional[int] = """ใ“ใ‚“ใฐใ‚“ใฏใ€ใ”บ็•Œใ€‚๐Ÿ˜€""" snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2 snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1) snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0] snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : Optional[int] ): snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : Union[str, Any] = tokenizer.encode("""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : int = tokenizer.encode("""""" , prefix_text="""ใ‚ใƒณใ„ใƒฏ""" ) snake_case__ : Dict = tokenizer.encode("""ใ„ใƒฏ""" , prefix_text="""ใ‚ใƒณ""" ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertNotEqual(snake_case_ , snake_case_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCamelCase ( self : Any ): snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) snake_case__ : int = [["""ๆญฆ็”ฐไฟก็Ž„""", """ใฏใ€"""], ["""็น”็”ฐไฟก้•ท""", """ใฎ้…ไธ‹ใฎใ€"""]] snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ ) # fmt: off snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]] snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , snake_case_ ) self.assertListEqual(x_token.token_type_ids , snake_case_ ) self.assertListEqual(x_token.attention_mask , snake_case_ ) self.assertListEqual(x_token_a.input_ids , snake_case_ ) self.assertListEqual(x_token_a.token_type_ids , snake_case_ ) self.assertListEqual(x_token_a.attention_mask , snake_case_ ) def lowerCamelCase ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase ( self : List[str] ): # tokenizer has no padding token pass
35
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase ( self : Dict ): snake_case__ : Optional[int] = tf.convert_to_tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, # 5th highest value; idx. 9 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, # 5th highest value; idx. 18 -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) snake_case__ : Optional[Any] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above snake_case__ : Optional[Any] = tf.convert_to_tensor( [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above snake_case__ : str = tf_top_k_top_p_filtering(snake_case_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) snake_case__ : Optional[int] = output[output != -float("""inf""" )] snake_case__ : int = tf.cast( tf.where(tf.not_equal(snake_case_ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1E-1_2 ) tf.debugging.assert_equal(snake_case_ , snake_case_ ) @require_tf class UpperCAmelCase_ ( unittest.TestCase , _a ): """simple docstring""" if is_tf_available(): lowercase = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq, "AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM, "AutoModelForVision2Seq": TFAutoModelForVisionaSeq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def lowerCamelCase ( self : Any ): # TF-only test: tf.saved_model export snake_case__ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) snake_case__ : List[Any] = 2 snake_case__ : Dict = 2 class UpperCAmelCase_ ( tf.Module ): """simple docstring""" def __init__( self : Tuple , snake_case_ : Dict ): super(snake_case_ , self ).__init__() snake_case__ : List[Any] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=snake_case_ , ) def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Dict ): snake_case__ : Optional[int] = self.model.generate( input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , ) return {"sequences": outputs["sequences"]} snake_case__ : List[str] = [[2, 0], [102, 103]] snake_case__ : Dict = [[1, 0], [1, 1]] snake_case__ : Dict = DummyModel(model=snake_case_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case_ , snake_case_ , signatures={"""serving_default""": dummy_model.serving} ) snake_case__ : str = tf.saved_model.load(snake_case_ ).signatures["""serving_default"""] for batch_size in range(1 , len(snake_case_ ) + 1 ): snake_case__ : Optional[int] = { """input_ids""": tf.constant(dummy_input_ids[:batch_size] ), """attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ), } snake_case__ : Union[str, Any] = serving_func(**snake_case_ )["""sequences"""] snake_case__ : Optional[Any] = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ ) tf.debugging.assert_equal(snake_case_ , snake_case_ ) @slow def lowerCamelCase ( self : str ): # TF-only test: tf.saved_model export snake_case__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) snake_case__ : str = 1 snake_case__ : List[Any] = 2 class UpperCAmelCase_ ( tf.Module ): """simple docstring""" def __init__( self : int , snake_case_ : Optional[Any] ): super(snake_case_ , self ).__init__() snake_case__ : str = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ), ) , jit_compile=snake_case_ , ) def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] ): snake_case__ : Any = self.model.generate( input_ids=snake_case_ , attention_mask=snake_case_ , max_new_tokens=snake_case_ , return_dict_in_generate=snake_case_ , ) return {"sequences": outputs["sequences"]} snake_case__ : Optional[Any] = [[2], [102, 103]] snake_case__ : int = [[1], [1, 1]] snake_case__ : Any = DummyModel(model=snake_case_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(snake_case_ , snake_case_ , signatures={"""serving_default""": dummy_model.serving} ) snake_case__ : Optional[int] = tf.saved_model.load(snake_case_ ).signatures["""serving_default"""] for input_row in range(len(snake_case_ ) ): snake_case__ : Any = { """input_ids""": tf.constant([dummy_input_ids[input_row]] ), """attention_mask""": tf.constant([dummy_attention_masks[input_row]] ), } snake_case__ : Optional[Any] = serving_func(**snake_case_ )["""sequences"""] snake_case__ : Optional[int] = test_model.generate(**snake_case_ , max_new_tokens=snake_case_ ) tf.debugging.assert_equal(snake_case_ , snake_case_ ) @slow @require_tensorflow_text def lowerCamelCase ( self : Tuple ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=snake_case_ ) class UpperCAmelCase_ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[Any] ): super().__init__() snake_case__ : Optional[Any] = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(snake_case_ , """spiece.model""" ) , """rb""" ).read() ) snake_case__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) def lowerCamelCase ( self : Dict , snake_case_ : List[str] , *snake_case_ : int , **snake_case_ : Optional[Any] ): snake_case__ : str = self.tokenizer.tokenize(snake_case_ ) snake_case__ , snake_case__ : int = text.pad_model_inputs( snake_case_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) snake_case__ : str = self.model.generate(input_ids=snake_case_ , attention_mask=snake_case_ ) return self.tokenizer.detokenize(snake_case_ ) snake_case__ : int = CompleteSentenceTransformer() snake_case__ : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" ) snake_case__ : List[str] = complete_model(snake_case_ ) snake_case__ : Optional[int] = tf.keras.Model(snake_case_ , snake_case_ ) keras_model.save(snake_case_ ) def lowerCamelCase ( self : int ): # Has PT equivalent: this test relies on random sampling snake_case__ : Dict = { """do_sample""": True, """num_beams""": 1, """top_p""": 0.7, """top_k""": 10, """temperature""": 0.7, } snake_case__ : str = 14 snake_case__ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) snake_case__ : List[str] = """Hello, my dog is cute and""" snake_case__ : List[Any] = tokenizer(snake_case_ , return_tensors="""tf""" ) snake_case__ : Any = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) snake_case__ : Dict = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) snake_case__ : List[str] = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) snake_case__ : Any = [638, 198] with tf.device(""":/CPU:0""" ): tf.random.set_seed(0 ) snake_case__ : Optional[int] = model.generate(**snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCamelCase ( self : Union[str, Any] ): # Has PT equivalent: ample use of framework-specific code snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) snake_case__ : int = """Hugging Face is a technology company based in New York and Paris.""" snake_case__ : Optional[int] = bart_tokenizer(snake_case_ , return_tensors="""tf""" ).input_ids snake_case__ : str = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) snake_case__ : Dict = bart_model.generate(snake_case_ ).numpy() class UpperCAmelCase_ ( _a ): """simple docstring""" def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=None , **snake_case_ : Union[str, Any] ): return super().call(snake_case_ , **snake_case_ ) snake_case__ : List[Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" ) snake_case__ : Any = bart_model.generate(snake_case_ , foo="""bar""" ).numpy() self.assertTrue(np.array_equal(snake_case_ , snake_case_ ) ) class UpperCAmelCase_ ( bart_model.model.encoder.__class__ ): """simple docstring""" def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] , **snake_case_ : Dict ): return super().call(snake_case_ , **snake_case_ ) snake_case__ : int = FakeEncoder(bart_model.config , bart_model.model.shared ) snake_case__ : int = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) snake_case__ : Tuple = bart_model.generate(snake_case_ ).numpy() with self.assertRaises(snake_case_ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(snake_case_ , foo="""bar""" )
35
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase_ ( _a ): """simple docstring""" lowercase = CustomTokenizer pass
35
1